diff --git a/.golangci.toml b/.golangci.toml index f3f1e8f9e..990b5fe6c 100644 --- a/.golangci.toml +++ b/.golangci.toml @@ -1,3 +1,10 @@ +[run] + deadline = "5m" + + skip-files = [ + "^old/.*", + ] + [linters-settings] [linters-settings.govet] @@ -13,8 +20,8 @@ suggest-new = true [linters-settings.goconst] - min-len = 2.0 - min-occurrences = 2.0 + min-len = 3.0 + min-occurrences = 3.0 [linters-settings.misspell] locale = "US" @@ -22,9 +29,19 @@ [linters] enable-all = true disable = [ - "maligned", - "lll", - "gas", - "dupl", - "prealloc" - ] \ No newline at end of file + "maligned", + "lll", + "gas", + "dupl", + "prealloc", + ] + +[issues] + max-per-linter = 0 + max-same = 0 + exclude = [ + "(.+) is deprecated:", + "cyclomatic complexity (\\d+) of func `\\(\\*Builder\\)\\.buildConstructor` is high", #alt/server/middleware/middlewares.go + "`logger` can be `github.com/containous/traefik/vendor/github.com/stretchr/testify/assert.TestingT`", # alt/middlewares/recovery/recovery.go: + "`fn` can be `net/http.Handler`", # alt/server/alice/chain.go + ] diff --git a/Gopkg.lock b/Gopkg.lock index 16ccf210e..dab6fda7b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -7,12 +7,6 @@ revision = "056a55f54a6cc77b440b31a56a5e7c3982d32811" version = "v0.22.0" -[[projects]] - branch = "master" - name = "code.cloudfoundry.org/clock" - packages = ["."] - revision = "02e53af36e6c978af692887ed449b74026d76fec" - [[projects]] branch = "master" name = "github.com/ArthurHlt/go-eureka-client" @@ -87,11 +81,6 @@ packages = ["."] revision = "e039e20e500c2c025d9145be375e27cf42a94174" -[[projects]] - name = "github.com/Microsoft/ApplicationInsights-Go" - packages = ["appinsights"] - revision = "98ac7ca026c26818888600ea0d966987aa56f043" - [[projects]] name = "github.com/Microsoft/go-winio" packages = ["."] @@ -277,6 +266,12 @@ packages = ["pathdriver"] revision = "b2b946a77f5973f420514090d6f6dd58b08303f0" +[[projects]] + branch = "containous-fork" + name = "github.com/containous/alice" + packages = ["."] + revision = "d83ebdd94cbdbcd9c6c6a22e1a0cde05e55d9d90" + [[projects]] name = "github.com/containous/flaeg" packages = [ @@ -298,12 +293,6 @@ revision = "66717a0e0ca950c4b6dc8c87b46da0b8495c6e41" version = "v3.1.1" -[[projects]] - name = "github.com/containous/traefik-extra-service-fabric" - packages = ["."] - revision = "6e90a9eef2ac9d320e55d6e994d169673a8d8b0f" - version = "v1.3.0" - [[projects]] name = "github.com/coreos/bbolt" packages = ["."] @@ -801,18 +790,6 @@ revision = "2d474a3089bcfce6b472779be9470a1f0ef3d5e4" version = "v1.3.7" -[[projects]] - branch = "master" - name = "github.com/jjcollinge/logrus-appinsights" - packages = ["."] - revision = "9b66602d496a139e4722bdde32f0f1ac1c12d4a8" - -[[projects]] - branch = "master" - name = "github.com/jjcollinge/servicefabric" - packages = ["."] - revision = "8eebe170fa1ba25d3dfb928b3f86a7313b13b9fe" - [[projects]] name = "github.com/jmespath/go-jmespath" packages = ["."] @@ -1814,6 +1791,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "37e89a543fca153d166cc70fd7fed689f06d894140bf617f69f5f664ffee621e" + inputs-digest = "d4f73c986b64003e14a36894149943e956e0dfa40b8837bfd11bf5fa3ad78c77" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index cdb1c4781..b2d951ebe 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -19,6 +19,11 @@ # name = "github.com/x/y" # version = "2.4.0" +[prune] + non-go = true + go-tests = true + unused-packages = true + [[constraint]] branch = "master" name = "github.com/ArthurHlt/go-eureka-client" @@ -60,13 +65,17 @@ branch = "master" name = "github.com/containous/mux" +[[constraint]] + branch = "containous-fork" + name = "github.com/containous/alice" + [[constraint]] name = "github.com/containous/staert" version = "3.1.1" -[[constraint]] - name = "github.com/containous/traefik-extra-service-fabric" - version = "1.3.0" +#[[constraint]] +# name = "github.com/containous/traefik-extra-service-fabric" +# version = "1.3.0" [[constraint]] name = "github.com/coreos/go-systemd" @@ -111,9 +120,9 @@ name = "github.com/influxdata/influxdb" version = "1.3.7" -[[constraint]] - branch = "master" - name = "github.com/jjcollinge/servicefabric" +#[[constraint]] +# branch = "master" +# name = "github.com/jjcollinge/servicefabric" [[constraint]] branch = "master" @@ -252,11 +261,6 @@ branch = "master" name = "github.com/miekg/dns" -[prune] - non-go = true - go-tests = true - unused-packages = true - [[constraint]] name = "github.com/patrickmn/go-cache" version = "2.1.0" diff --git a/acme/account.go b/acme/account.go index 86db58a1b..97f757665 100644 --- a/acme/account.go +++ b/acme/account.go @@ -15,8 +15,8 @@ import ( "time" "github.com/containous/traefik/log" - acmeprovider "github.com/containous/traefik/provider/acme" - "github.com/containous/traefik/types" + acmeprovider "github.com/containous/traefik/old/provider/acme" + "github.com/containous/traefik/old/types" "github.com/xenolf/lego/acme" ) diff --git a/acme/acme.go b/acme/acme.go index a740edacd..3e941fb11 100644 --- a/acme/acme.go +++ b/acme/acme.go @@ -22,12 +22,11 @@ import ( "github.com/containous/staert" "github.com/containous/traefik/cluster" "github.com/containous/traefik/log" - acmeprovider "github.com/containous/traefik/provider/acme" + acmeprovider "github.com/containous/traefik/old/provider/acme" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" "github.com/containous/traefik/version" "github.com/eapache/channels" - "github.com/sirupsen/logrus" "github.com/xenolf/lego/acme" legolog "github.com/xenolf/lego/log" "github.com/xenolf/lego/providers/dns" @@ -53,8 +52,6 @@ type ACME struct { DNSChallenge *acmeprovider.DNSChallenge `description:"Activate DNS-01 Challenge"` HTTPChallenge *acmeprovider.HTTPChallenge `description:"Activate HTTP-01 Challenge"` TLSChallenge *acmeprovider.TLSChallenge `description:"Activate TLS-ALPN-01 Challenge"` - DNSProvider string `description:"(Deprecated) Activate DNS-01 Challenge"` // Deprecated - DelayDontCheckDNS flaeg.Duration `description:"(Deprecated) Assume DNS propagates after a delay in seconds rather than finding and querying nameservers."` // Deprecated ACMELogging bool `description:"Enable debug logging of ACME actions."` OverrideCertificates bool `description:"Enable to override certificates in key-value store when using storeconfig"` client *acme.Client @@ -73,7 +70,7 @@ func (a *ACME) init() error { acme.UserAgent = fmt.Sprintf("containous-traefik/%s", version.Version) if a.ACMELogging { - legolog.Logger = fmtlog.New(log.WriterLevel(logrus.InfoLevel), "legolog: ", 0) + legolog.Logger = log.WithoutContext() } else { legolog.Logger = fmtlog.New(ioutil.Discard, "", 0) } @@ -744,7 +741,7 @@ func (a *ACME) getValidDomains(domains []string, wildcardAllowed bool) ([]string return nil, fmt.Errorf("unable to generate a wildcard certificate for domain %q from a 'Host' rule", strings.Join(domains, ",")) } - if a.DNSChallenge == nil && len(a.DNSProvider) == 0 { + if a.DNSChallenge == nil { return nil, fmt.Errorf("unable to generate a wildcard certificate for domain %q : ACME needs a DNSChallenge", strings.Join(domains, ",")) } if strings.HasPrefix(domains[0], "*.*") { diff --git a/acme/acme_test.go b/acme/acme_test.go index aadfa17b6..e3357fe88 100644 --- a/acme/acme_test.go +++ b/acme/acme_test.go @@ -11,9 +11,9 @@ import ( "testing" "time" - acmeprovider "github.com/containous/traefik/provider/acme" + acmeprovider "github.com/containous/traefik/old/provider/acme" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/tls/generate" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" "github.com/xenolf/lego/acme" ) diff --git a/acme/localStore.go b/acme/localStore.go index b6883b38a..ec26f0119 100644 --- a/acme/localStore.go +++ b/acme/localStore.go @@ -6,7 +6,7 @@ import ( "os" "github.com/containous/traefik/log" - "github.com/containous/traefik/provider/acme" + "github.com/containous/traefik/old/provider/acme" ) // LocalStore is a store using a file as storage diff --git a/anonymize/anonymize_config_test.go b/anonymize/anonymize_config_test.go index 175ef9f11..0f0dc7a3b 100644 --- a/anonymize/anonymize_config_test.go +++ b/anonymize/anonymize_config_test.go @@ -8,29 +8,29 @@ import ( "github.com/containous/flaeg/parse" "github.com/containous/traefik/acme" - "github.com/containous/traefik/api" - "github.com/containous/traefik/configuration" - "github.com/containous/traefik/middlewares" - "github.com/containous/traefik/provider" - acmeprovider "github.com/containous/traefik/provider/acme" - "github.com/containous/traefik/provider/boltdb" - "github.com/containous/traefik/provider/consul" - "github.com/containous/traefik/provider/consulcatalog" - "github.com/containous/traefik/provider/docker" - "github.com/containous/traefik/provider/dynamodb" - "github.com/containous/traefik/provider/ecs" - "github.com/containous/traefik/provider/etcd" - "github.com/containous/traefik/provider/eureka" - "github.com/containous/traefik/provider/file" - "github.com/containous/traefik/provider/kubernetes" - "github.com/containous/traefik/provider/kv" - "github.com/containous/traefik/provider/marathon" - "github.com/containous/traefik/provider/mesos" - "github.com/containous/traefik/provider/rancher" - "github.com/containous/traefik/provider/zk" + "github.com/containous/traefik/old/api" + "github.com/containous/traefik/old/configuration" + "github.com/containous/traefik/old/middlewares" + "github.com/containous/traefik/old/provider" + acmeprovider "github.com/containous/traefik/old/provider/acme" + "github.com/containous/traefik/old/provider/boltdb" + "github.com/containous/traefik/old/provider/consul" + "github.com/containous/traefik/old/provider/consulcatalog" + "github.com/containous/traefik/old/provider/docker" + "github.com/containous/traefik/old/provider/dynamodb" + "github.com/containous/traefik/old/provider/ecs" + "github.com/containous/traefik/old/provider/etcd" + "github.com/containous/traefik/old/provider/eureka" + "github.com/containous/traefik/old/provider/file" + "github.com/containous/traefik/old/provider/kubernetes" + "github.com/containous/traefik/old/provider/kv" + "github.com/containous/traefik/old/provider/marathon" + "github.com/containous/traefik/old/provider/mesos" + "github.com/containous/traefik/old/provider/rancher" + "github.com/containous/traefik/old/provider/zk" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" traefiktls "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" "github.com/elazarl/go-bindata-assetfs" "github.com/thoas/stats" ) @@ -173,15 +173,14 @@ func TestDo_globalConfiguration(t *testing.T) { SANs: []string{"Domains acme SANs 1", "Domains acme SANs 2", "Domains acme SANs 3"}, }, }, - Storage: "Storage", - StorageFile: "StorageFile", - OnDemand: true, - OnHostRule: true, - CAServer: "CAServer", - EntryPoint: "EntryPoint", - DNSChallenge: &acmeprovider.DNSChallenge{Provider: "DNSProvider"}, - DelayDontCheckDNS: 666, - ACMELogging: true, + Storage: "Storage", + StorageFile: "StorageFile", + OnDemand: true, + OnHostRule: true, + CAServer: "CAServer", + EntryPoint: "EntryPoint", + DNSChallenge: &acmeprovider.DNSChallenge{Provider: "DNSProvider"}, + ACMELogging: true, TLSConfig: &tls.Config{ InsecureSkipVerify: true, // ... diff --git a/api/dashboard.go b/api/dashboard.go index a3232a166..059a9eaf2 100644 --- a/api/dashboard.go +++ b/api/dashboard.go @@ -13,10 +13,10 @@ type DashboardHandler struct { Assets *assetfs.AssetFS } -// AddRoutes add dashboard routes on a router -func (g DashboardHandler) AddRoutes(router *mux.Router) { +// Append add dashboard routes on a router +func (g DashboardHandler) Append(router *mux.Router) { if g.Assets == nil { - log.Error("No assets for dashboard") + log.WithoutContext().Error("No assets for dashboard") return } diff --git a/api/debug.go b/api/debug.go index 785a61988..35868f3c6 100644 --- a/api/debug.go +++ b/api/debug.go @@ -11,7 +11,8 @@ import ( ) func init() { - expvar.Publish("Goroutines", expvar.Func(goroutines)) + // FIXME Goroutines2 -> Goroutines + expvar.Publish("Goroutines2", expvar.Func(goroutines)) } func goroutines() interface{} { @@ -21,8 +22,8 @@ func goroutines() interface{} { // DebugHandler expose debug routes type DebugHandler struct{} -// AddRoutes add debug routes on a router -func (g DebugHandler) AddRoutes(router *mux.Router) { +// Append add debug routes on a router +func (g DebugHandler) Append(router *mux.Router) { router.Methods(http.MethodGet).Path("/debug/vars"). HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") diff --git a/api/handler.go b/api/handler.go index 370955978..f43b1a2d2 100644 --- a/api/handler.go +++ b/api/handler.go @@ -1,252 +1,304 @@ package api import ( + "io" "net/http" "github.com/containous/mux" + "github.com/containous/traefik/config" "github.com/containous/traefik/log" - "github.com/containous/traefik/middlewares" "github.com/containous/traefik/safe" "github.com/containous/traefik/types" "github.com/containous/traefik/version" "github.com/elazarl/go-bindata-assetfs" - thoas_stats "github.com/thoas/stats" + thoasstats "github.com/thoas/stats" "github.com/unrolled/render" ) +// ResourceIdentifier a resource identifier +type ResourceIdentifier struct { + ID string `json:"id"` + Path string `json:"path"` +} + +// ProviderRepresentation a provider with resource identifiers +type ProviderRepresentation struct { + Routers []ResourceIdentifier `json:"routers,omitempty"` + Middlewares []ResourceIdentifier `json:"middlewares,omitempty"` + Services []ResourceIdentifier `json:"services,omitempty"` +} + +// RouterRepresentation extended version of a router configuration with an ID +type RouterRepresentation struct { + *config.Router + ID string `json:"id"` +} + +// MiddlewareRepresentation extended version of a middleware configuration with an ID +type MiddlewareRepresentation struct { + *config.Middleware + ID string `json:"id"` +} + +// ServiceRepresentation extended version of a service configuration with an ID +type ServiceRepresentation struct { + *config.Service + ID string `json:"id"` +} + // Handler expose api routes type Handler struct { - EntryPoint string `description:"EntryPoint" export:"true"` - Dashboard bool `description:"Activate dashboard" export:"true"` - Debug bool `export:"true"` + EntryPoint string + Dashboard bool + Debug bool CurrentConfigurations *safe.Safe - Statistics *types.Statistics `description:"Enable more detailed statistics" export:"true"` - Stats *thoas_stats.Stats `json:"-"` - StatsRecorder *middlewares.StatsRecorder `json:"-"` - DashboardAssets *assetfs.AssetFS `json:"-"` + Statistics *types.Statistics + Stats *thoasstats.Stats + // StatsRecorder *middlewares.StatsRecorder // FIXME stats + DashboardAssets *assetfs.AssetFS } -var ( - templatesRenderer = render.New(render.Options{ - Directory: "nowhere", - }) -) +var templateRenderer jsonRenderer = render.New(render.Options{Directory: "nowhere"}) -// AddRoutes add api routes on a router -func (p Handler) AddRoutes(router *mux.Router) { +type jsonRenderer interface { + JSON(w io.Writer, status int, v interface{}) error +} + +// Append add api routes on a router +func (p Handler) Append(router *mux.Router) { if p.Debug { - DebugHandler{}.AddRoutes(router) + DebugHandler{}.Append(router) } - router.Methods(http.MethodGet).Path("/api").HandlerFunc(p.getConfigHandler) - router.Methods(http.MethodGet).Path("/api/providers").HandlerFunc(p.getConfigHandler) + router.Methods(http.MethodGet).Path("/api/providers").HandlerFunc(p.getProvidersHandler) router.Methods(http.MethodGet).Path("/api/providers/{provider}").HandlerFunc(p.getProviderHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/backends").HandlerFunc(p.getBackendsHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/backends/{backend}").HandlerFunc(p.getBackendHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/backends/{backend}/servers").HandlerFunc(p.getServersHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/backends/{backend}/servers/{server}").HandlerFunc(p.getServerHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/frontends").HandlerFunc(p.getFrontendsHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/frontends/{frontend}").HandlerFunc(p.getFrontendHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/frontends/{frontend}/routes").HandlerFunc(p.getRoutesHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/frontends/{frontend}/routes/{route}").HandlerFunc(p.getRouteHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/routers").HandlerFunc(p.getRoutersHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/routers/{router}").HandlerFunc(p.getRouterHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/middlewares").HandlerFunc(p.getMiddlewaresHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/middlewares/{middleware}").HandlerFunc(p.getMiddlewareHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/services").HandlerFunc(p.getServicesHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/services/{service}").HandlerFunc(p.getServiceHandler) + // FIXME stats // health route - router.Methods(http.MethodGet).Path("/health").HandlerFunc(p.getHealthHandler) + //router.Methods(http.MethodGet).Path("/health").HandlerFunc(p.getHealthHandler) - version.Handler{}.AddRoutes(router) + version.Handler{}.Append(router) if p.Dashboard { - DashboardHandler{Assets: p.DashboardAssets}.AddRoutes(router) + DashboardHandler{Assets: p.DashboardAssets}.Append(router) } } -func getProviderIDFromVars(vars map[string]string) string { - providerID := vars["provider"] - // TODO: Deprecated - if providerID == "rest" { - providerID = "web" +func (p Handler) getProvidersHandler(rw http.ResponseWriter, request *http.Request) { + // FIXME handle currentConfiguration + if p.CurrentConfigurations != nil { + currentConfigurations, ok := p.CurrentConfigurations.Get().(config.Configurations) + if !ok { + rw.WriteHeader(http.StatusOK) + return + } + + var providers []ResourceIdentifier + for name := range currentConfigurations { + providers = append(providers, ResourceIdentifier{ + ID: name, + Path: "/api/providers/" + name, + }) + } + + err := templateRenderer.JSON(rw, http.StatusOK, providers) + if err != nil { + log.FromContext(request.Context()).Error(err) + http.Error(rw, err.Error(), http.StatusInternalServerError) + } } - return providerID } -func (p Handler) getConfigHandler(response http.ResponseWriter, request *http.Request) { - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - err := templatesRenderer.JSON(response, http.StatusOK, currentConfigurations) +func (p Handler) getProviderHandler(rw http.ResponseWriter, request *http.Request) { + providerID := mux.Vars(request)["provider"] + + currentConfigurations := p.CurrentConfigurations.Get().(config.Configurations) + + provider, ok := currentConfigurations[providerID] + if !ok { + http.NotFound(rw, request) + return + } + + var routers []ResourceIdentifier + for name := range provider.Routers { + routers = append(routers, ResourceIdentifier{ + ID: name, + Path: "/api/providers/" + providerID + "/routers", + }) + } + + var services []ResourceIdentifier + for name := range provider.Services { + services = append(services, ResourceIdentifier{ + ID: name, + Path: "/api/providers/" + providerID + "/services", + }) + } + + var middlewares []ResourceIdentifier + for name := range provider.Middlewares { + middlewares = append(middlewares, ResourceIdentifier{ + ID: name, + Path: "/api/providers/" + providerID + "/middlewares", + }) + } + + providers := ProviderRepresentation{Routers: routers, Middlewares: middlewares, Services: services} + + err := templateRenderer.JSON(rw, http.StatusOK, providers) if err != nil { - log.Error(err) + log.FromContext(request.Context()).Error(err) + http.Error(rw, err.Error(), http.StatusInternalServerError) } } -func (p Handler) getProviderHandler(response http.ResponseWriter, request *http.Request) { - providerID := getProviderIDFromVars(mux.Vars(request)) +func (p Handler) getRoutersHandler(rw http.ResponseWriter, request *http.Request) { + providerID := mux.Vars(request)["provider"] - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, provider) - if err != nil { - log.Error(err) - } - } else { - http.NotFound(response, request) + currentConfigurations := p.CurrentConfigurations.Get().(config.Configurations) + + provider, ok := currentConfigurations[providerID] + if !ok { + http.NotFound(rw, request) + return } -} -func (p Handler) getBackendsHandler(response http.ResponseWriter, request *http.Request) { - providerID := getProviderIDFromVars(mux.Vars(request)) - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, provider.Backends) - if err != nil { - log.Error(err) - } - } else { - http.NotFound(response, request) + var routers []RouterRepresentation + for name, router := range provider.Routers { + routers = append(routers, RouterRepresentation{Router: router, ID: name}) } -} -func (p Handler) getBackendHandler(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - providerID := getProviderIDFromVars(vars) - backendID := vars["backend"] - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - if backend, ok := provider.Backends[backendID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, backend) - if err != nil { - log.Error(err) - } - return - } - } - http.NotFound(response, request) -} - -func (p Handler) getServersHandler(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - providerID := getProviderIDFromVars(vars) - backendID := vars["backend"] - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - if backend, ok := provider.Backends[backendID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, backend.Servers) - if err != nil { - log.Error(err) - } - return - } - } - http.NotFound(response, request) -} - -func (p Handler) getServerHandler(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - providerID := getProviderIDFromVars(vars) - backendID := vars["backend"] - serverID := vars["server"] - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - if backend, ok := provider.Backends[backendID]; ok { - if server, ok := backend.Servers[serverID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, server) - if err != nil { - log.Error(err) - } - return - } - } - } - http.NotFound(response, request) -} - -func (p Handler) getFrontendsHandler(response http.ResponseWriter, request *http.Request) { - providerID := getProviderIDFromVars(mux.Vars(request)) - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, provider.Frontends) - if err != nil { - log.Error(err) - } - } else { - http.NotFound(response, request) - } -} - -func (p Handler) getFrontendHandler(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - providerID := getProviderIDFromVars(vars) - frontendID := vars["frontend"] - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - if frontend, ok := provider.Frontends[frontendID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, frontend) - if err != nil { - log.Error(err) - } - return - } - } - http.NotFound(response, request) -} - -func (p Handler) getRoutesHandler(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - providerID := getProviderIDFromVars(vars) - frontendID := vars["frontend"] - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - if frontend, ok := provider.Frontends[frontendID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, frontend.Routes) - if err != nil { - log.Error(err) - } - return - } - } - http.NotFound(response, request) -} - -func (p Handler) getRouteHandler(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - providerID := getProviderIDFromVars(vars) - frontendID := vars["frontend"] - routeID := vars["route"] - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - if frontend, ok := provider.Frontends[frontendID]; ok { - if route, ok := frontend.Routes[routeID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, route) - if err != nil { - log.Error(err) - } - return - } - } - } - http.NotFound(response, request) -} - -// healthResponse combines data returned by thoas/stats with statistics (if -// they are enabled). -type healthResponse struct { - *thoas_stats.Data - *middlewares.Stats -} - -func (p *Handler) getHealthHandler(response http.ResponseWriter, request *http.Request) { - health := &healthResponse{Data: p.Stats.Data()} - if p.StatsRecorder != nil { - health.Stats = p.StatsRecorder.Data() - } - err := templatesRenderer.JSON(response, http.StatusOK, health) + err := templateRenderer.JSON(rw, http.StatusOK, routers) if err != nil { - log.Error(err) + log.FromContext(request.Context()).Error(err) + http.Error(rw, err.Error(), http.StatusInternalServerError) + } +} + +func (p Handler) getRouterHandler(rw http.ResponseWriter, request *http.Request) { + providerID := mux.Vars(request)["provider"] + routerID := mux.Vars(request)["router"] + + currentConfigurations := p.CurrentConfigurations.Get().(config.Configurations) + + provider, ok := currentConfigurations[providerID] + if !ok { + http.NotFound(rw, request) + return + } + + router, ok := provider.Routers[routerID] + if !ok { + http.NotFound(rw, request) + return + } + + err := templateRenderer.JSON(rw, http.StatusOK, router) + if err != nil { + log.FromContext(request.Context()).Error(err) + http.Error(rw, err.Error(), http.StatusInternalServerError) + } +} + +func (p Handler) getMiddlewaresHandler(rw http.ResponseWriter, request *http.Request) { + providerID := mux.Vars(request)["provider"] + + currentConfigurations := p.CurrentConfigurations.Get().(config.Configurations) + + provider, ok := currentConfigurations[providerID] + if !ok { + http.NotFound(rw, request) + return + } + + var middlewares []MiddlewareRepresentation + for name, middleware := range provider.Middlewares { + middlewares = append(middlewares, MiddlewareRepresentation{Middleware: middleware, ID: name}) + } + + err := templateRenderer.JSON(rw, http.StatusOK, middlewares) + if err != nil { + log.FromContext(request.Context()).Error(err) + http.Error(rw, err.Error(), http.StatusInternalServerError) + } +} + +func (p Handler) getMiddlewareHandler(rw http.ResponseWriter, request *http.Request) { + providerID := mux.Vars(request)["provider"] + middlewareID := mux.Vars(request)["middleware"] + + currentConfigurations := p.CurrentConfigurations.Get().(config.Configurations) + + provider, ok := currentConfigurations[providerID] + if !ok { + http.NotFound(rw, request) + return + } + + middleware, ok := provider.Middlewares[middlewareID] + if !ok { + http.NotFound(rw, request) + return + } + + err := templateRenderer.JSON(rw, http.StatusOK, middleware) + if err != nil { + log.FromContext(request.Context()).Error(err) + http.Error(rw, err.Error(), http.StatusInternalServerError) + } +} + +func (p Handler) getServicesHandler(rw http.ResponseWriter, request *http.Request) { + providerID := mux.Vars(request)["provider"] + + currentConfigurations := p.CurrentConfigurations.Get().(config.Configurations) + + provider, ok := currentConfigurations[providerID] + if !ok { + http.NotFound(rw, request) + return + } + + var services []ServiceRepresentation + for name, service := range provider.Services { + services = append(services, ServiceRepresentation{Service: service, ID: name}) + } + + err := templateRenderer.JSON(rw, http.StatusOK, services) + if err != nil { + log.FromContext(request.Context()).Error(err) + http.Error(rw, err.Error(), http.StatusInternalServerError) + } +} + +func (p Handler) getServiceHandler(rw http.ResponseWriter, request *http.Request) { + providerID := mux.Vars(request)["provider"] + serviceID := mux.Vars(request)["service"] + + currentConfigurations := p.CurrentConfigurations.Get().(config.Configurations) + + provider, ok := currentConfigurations[providerID] + if !ok { + http.NotFound(rw, request) + return + } + + service, ok := provider.Services[serviceID] + if !ok { + http.NotFound(rw, request) + return + } + + err := templateRenderer.JSON(rw, http.StatusOK, service) + if err != nil { + log.FromContext(request.Context()).Error(err) + http.Error(rw, err.Error(), http.StatusInternalServerError) } } diff --git a/api/handler_test.go b/api/handler_test.go new file mode 100644 index 000000000..bb60f1632 --- /dev/null +++ b/api/handler_test.go @@ -0,0 +1,210 @@ +package api + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/containous/mux" + "github.com/containous/traefik/config" + "github.com/containous/traefik/safe" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHandler_Configuration(t *testing.T) { + type expected struct { + statusCode int + body string + } + + testCases := []struct { + desc string + path string + configuration config.Configurations + expected expected + }{ + { + desc: "Get all the providers", + path: "/api/providers", + configuration: config.Configurations{ + "foo": { + Routers: map[string]*config.Router{ + "bar": {EntryPoints: []string{"foo", "bar"}}, + }, + }, + }, + expected: expected{statusCode: http.StatusOK, body: `[{"id":"foo","path":"/api/providers/foo"}]`}, + }, + { + desc: "Get a provider", + path: "/api/providers/foo", + configuration: config.Configurations{ + "foo": { + Routers: map[string]*config.Router{ + "bar": {EntryPoints: []string{"foo", "bar"}}, + }, + Middlewares: map[string]*config.Middleware{ + "bar": { + AddPrefix: &config.AddPrefix{Prefix: "bar"}, + }, + }, + Services: map[string]*config.Service{ + "foo": { + LoadBalancer: &config.LoadBalancerService{ + Method: "wrr", + }, + }, + }, + }, + }, + expected: expected{statusCode: http.StatusOK, body: `{"routers":[{"id":"bar","path":"/api/providers/foo/routers"}],"middlewares":[{"id":"bar","path":"/api/providers/foo/middlewares"}],"services":[{"id":"foo","path":"/api/providers/foo/services"}]}`}, + }, + { + desc: "Provider not found", + path: "/api/providers/foo", + configuration: config.Configurations{}, + expected: expected{statusCode: http.StatusNotFound, body: "404 page not found\n"}, + }, + { + desc: "Get all routers", + path: "/api/providers/foo/routers", + configuration: config.Configurations{ + "foo": { + Routers: map[string]*config.Router{ + "bar": {EntryPoints: []string{"foo", "bar"}}, + }, + }, + }, + expected: expected{statusCode: http.StatusOK, body: `[{"entryPoints":["foo","bar"],"id":"bar"}]`}, + }, + { + desc: "Get a router", + path: "/api/providers/foo/routers/bar", + configuration: config.Configurations{ + "foo": { + Routers: map[string]*config.Router{ + "bar": {EntryPoints: []string{"foo", "bar"}}, + }, + }, + }, + expected: expected{statusCode: http.StatusOK, body: `{"entryPoints":["foo","bar"]}`}, + }, + { + desc: "Router not found", + path: "/api/providers/foo/routers/bar", + configuration: config.Configurations{ + "foo": {}, + }, + expected: expected{statusCode: http.StatusNotFound, body: "404 page not found\n"}, + }, + { + desc: "Get all services", + path: "/api/providers/foo/services", + configuration: config.Configurations{ + "foo": { + Services: map[string]*config.Service{ + "foo": { + LoadBalancer: &config.LoadBalancerService{ + Method: "wrr", + }, + }, + }, + }, + }, + expected: expected{statusCode: http.StatusOK, body: `[{"loadbalancer":{"method":"wrr","passHostHeader":false},"id":"foo"}]`}, + }, + { + desc: "Get a service", + path: "/api/providers/foo/services/foo", + configuration: config.Configurations{ + "foo": { + Services: map[string]*config.Service{ + "foo": { + LoadBalancer: &config.LoadBalancerService{ + Method: "wrr", + }, + }, + }, + }, + }, + expected: expected{statusCode: http.StatusOK, body: `{"loadbalancer":{"method":"wrr","passHostHeader":false}}`}, + }, + { + desc: "Service not found", + path: "/api/providers/foo/services/bar", + configuration: config.Configurations{ + "foo": {}, + }, + expected: expected{statusCode: http.StatusNotFound, body: "404 page not found\n"}, + }, + { + desc: "Get all middlewares", + path: "/api/providers/foo/middlewares", + configuration: config.Configurations{ + "foo": { + Middlewares: map[string]*config.Middleware{ + "bar": { + AddPrefix: &config.AddPrefix{Prefix: "bar"}, + }, + }, + }, + }, + expected: expected{statusCode: http.StatusOK, body: `[{"addPrefix":{"prefix":"bar"},"id":"bar"}]`}, + }, + { + desc: "Get a middleware", + path: "/api/providers/foo/middlewares/bar", + configuration: config.Configurations{ + "foo": { + Middlewares: map[string]*config.Middleware{ + "bar": { + AddPrefix: &config.AddPrefix{Prefix: "bar"}, + }, + }, + }, + }, + expected: expected{statusCode: http.StatusOK, body: `{"addPrefix":{"prefix":"bar"}}`}, + }, + { + desc: "Middleware not found", + path: "/api/providers/foo/middlewares/bar", + configuration: config.Configurations{ + "foo": {}, + }, + expected: expected{statusCode: http.StatusNotFound, body: "404 page not found\n"}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + currentConfiguration := &safe.Safe{} + currentConfiguration.Set(test.configuration) + + handler := Handler{ + CurrentConfigurations: currentConfiguration, + } + + router := mux.NewRouter() + handler.Append(router) + + server := httptest.NewServer(router) + + resp, err := http.DefaultClient.Get(server.URL + test.path) + require.NoError(t, err) + + assert.Equal(t, test.expected.statusCode, resp.StatusCode) + + content, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + err = resp.Body.Close() + require.NoError(t, err) + + assert.Equal(t, test.expected.body, string(content)) + }) + } +} diff --git a/cluster/leadership.go b/cluster/leadership.go index 30ce76bac..7276cca03 100644 --- a/cluster/leadership.go +++ b/cluster/leadership.go @@ -8,8 +8,8 @@ import ( "github.com/cenk/backoff" "github.com/containous/mux" "github.com/containous/traefik/log" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" "github.com/docker/leadership" "github.com/unrolled/render" ) diff --git a/cmd/bug/bug_test.go b/cmd/bug/bug_test.go index 2d78a93b6..e21136bcb 100644 --- a/cmd/bug/bug_test.go +++ b/cmd/bug/bug_test.go @@ -5,10 +5,10 @@ import ( "github.com/containous/traefik/anonymize" "github.com/containous/traefik/cmd" - "github.com/containous/traefik/configuration" - "github.com/containous/traefik/provider/file" + "github.com/containous/traefik/old/configuration" + "github.com/containous/traefik/old/provider/file" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" ) diff --git a/cmd/configuration.go b/cmd/configuration.go index d5bac0aa7..f9a9d95af 100644 --- a/cmd/configuration.go +++ b/cmd/configuration.go @@ -4,32 +4,30 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik-extra-service-fabric" - "github.com/containous/traefik/api" - "github.com/containous/traefik/configuration" - "github.com/containous/traefik/middlewares/accesslog" - "github.com/containous/traefik/middlewares/tracing" - "github.com/containous/traefik/middlewares/tracing/datadog" - "github.com/containous/traefik/middlewares/tracing/jaeger" - "github.com/containous/traefik/middlewares/tracing/zipkin" - "github.com/containous/traefik/ping" - "github.com/containous/traefik/provider/boltdb" - "github.com/containous/traefik/provider/consul" - "github.com/containous/traefik/provider/consulcatalog" - "github.com/containous/traefik/provider/docker" - "github.com/containous/traefik/provider/dynamodb" - "github.com/containous/traefik/provider/ecs" - "github.com/containous/traefik/provider/etcd" - "github.com/containous/traefik/provider/eureka" - "github.com/containous/traefik/provider/file" - "github.com/containous/traefik/provider/kubernetes" - "github.com/containous/traefik/provider/marathon" - "github.com/containous/traefik/provider/mesos" - "github.com/containous/traefik/provider/rancher" - "github.com/containous/traefik/provider/rest" - "github.com/containous/traefik/provider/zk" - "github.com/containous/traefik/types" - sf "github.com/jjcollinge/servicefabric" + "github.com/containous/traefik/old/api" + "github.com/containous/traefik/old/configuration" + "github.com/containous/traefik/old/middlewares/accesslog" + "github.com/containous/traefik/old/middlewares/tracing" + "github.com/containous/traefik/old/middlewares/tracing/datadog" + "github.com/containous/traefik/old/middlewares/tracing/jaeger" + "github.com/containous/traefik/old/middlewares/tracing/zipkin" + "github.com/containous/traefik/old/ping" + "github.com/containous/traefik/old/provider/boltdb" + "github.com/containous/traefik/old/provider/consul" + "github.com/containous/traefik/old/provider/consulcatalog" + "github.com/containous/traefik/old/provider/docker" + "github.com/containous/traefik/old/provider/dynamodb" + "github.com/containous/traefik/old/provider/ecs" + "github.com/containous/traefik/old/provider/etcd" + "github.com/containous/traefik/old/provider/eureka" + "github.com/containous/traefik/old/provider/file" + "github.com/containous/traefik/old/provider/kubernetes" + "github.com/containous/traefik/old/provider/marathon" + "github.com/containous/traefik/old/provider/mesos" + "github.com/containous/traefik/old/provider/rancher" + "github.com/containous/traefik/old/provider/rest" + "github.com/containous/traefik/old/provider/zk" + "github.com/containous/traefik/old/types" ) // TraefikConfiguration holds GlobalConfiguration and other stuff @@ -145,11 +143,6 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration { var defaultEureka eureka.Provider defaultEureka.RefreshSeconds = parse.Duration(30 * time.Second) - // default ServiceFabric - var defaultServiceFabric servicefabric.Provider - defaultServiceFabric.APIVersion = sf.DefaultAPIVersion - defaultServiceFabric.RefreshSeconds = 10 - // default Ping var defaultPing = ping.Handler{ EntryPoint: "traefik", diff --git a/cmd/convert/convert.go b/cmd/convert/convert.go new file mode 100644 index 000000000..21641bb3c --- /dev/null +++ b/cmd/convert/convert.go @@ -0,0 +1,146 @@ +package main + +import ( + "os" + "strings" + + "github.com/BurntSushi/toml" + "github.com/containous/traefik/config" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" + "github.com/sirupsen/logrus" +) + +var oldvalue = ` +[backends] + [backends.backend1] + [backends.backend1.servers.server1] + url = "http://127.0.0.1:9010" + weight = 1 + [backends.backend2] + [backends.backend2.servers.server1] + url = "http://127.0.0.1:9020" + weight = 1 + +[frontends] + [frontends.frontend1] + backend = "backend1" + [frontends.frontend1.routes.test_1] + rule = "Host:snitest.com" + [frontends.frontend2] + backend = "backend2" + [frontends.frontend2.routes.test_2] + rule = "Host:snitest.org" + +` + +// Temporary utility to convert dynamic conf v1 to v2 +func main() { + log.SetOutput(os.Stdout) + log.SetLevel(logrus.DebugLevel) + + oldconfig := &types.Configuration{} + toml.Decode(oldvalue, oldconfig) + + newconfig := config.Configuration{ + Routers: make(map[string]*config.Router), + Middlewares: make(map[string]*config.Middleware), + Services: make(map[string]*config.Service), + } + + for frontendName, frontend := range oldconfig.Frontends { + newconfig.Routers[replaceFrontend(frontendName)] = convertFrontend(frontend) + if frontend.PassHostHeader { + log.Warn("ignore PassHostHeader") + } + } + + for backendName, backend := range oldconfig.Backends { + newconfig.Services[replaceBackend(backendName)] = convertBackend(backend) + } + + encoder := toml.NewEncoder(os.Stdout) + encoder.Encode(newconfig) +} + +func replaceBackend(name string) string { + return strings.Replace(name, "backend", "service", -1) +} + +func replaceFrontend(name string) string { + return strings.Replace(name, "frontend", "router", -1) +} + +func convertFrontend(frontend *types.Frontend) *config.Router { + router := &config.Router{ + EntryPoints: frontend.EntryPoints, + Middlewares: nil, + Service: replaceBackend(frontend.Backend), + Priority: frontend.Priority, + } + + if len(frontend.Routes) > 1 { + log.Fatal("Multiple routes") + } + + for _, route := range frontend.Routes { + router.Rule = route.Rule + } + + return router +} + +func convertBackend(backend *types.Backend) *config.Service { + service := &config.Service{ + LoadBalancer: &config.LoadBalancerService{ + Stickiness: nil, + Servers: nil, + Method: "", + HealthCheck: nil, + PassHostHeader: false, + }, + } + + if backend.Buffering != nil { + log.Warn("Buffering not implemented") + } + + if backend.CircuitBreaker != nil { + log.Warn("CircuitBreaker not implemented") + } + + if backend.MaxConn != nil { + log.Warn("MaxConn not implemented") + } + + for _, oldserver := range backend.Servers { + service.LoadBalancer.Servers = append(service.LoadBalancer.Servers, config.Server{ + URL: oldserver.URL, + Weight: oldserver.Weight, + }) + } + + if backend.LoadBalancer != nil { + service.LoadBalancer.Method = backend.LoadBalancer.Method + if backend.LoadBalancer.Stickiness != nil { + service.LoadBalancer.Stickiness = &config.Stickiness{ + CookieName: backend.LoadBalancer.Stickiness.CookieName, + } + } + + if backend.HealthCheck != nil { + service.LoadBalancer.HealthCheck = &config.HealthCheck{ + Scheme: backend.HealthCheck.Scheme, + Path: backend.HealthCheck.Path, + Port: backend.HealthCheck.Port, + Interval: backend.HealthCheck.Interval, + Timeout: backend.HealthCheck.Timeout, + Hostname: backend.HealthCheck.Hostname, + Headers: backend.HealthCheck.Headers, + } + } + + } + + return service +} diff --git a/cmd/healthcheck/healthcheck.go b/cmd/healthcheck/healthcheck.go index 1e31ec679..8822c76a3 100644 --- a/cmd/healthcheck/healthcheck.go +++ b/cmd/healthcheck/healthcheck.go @@ -10,7 +10,7 @@ import ( "github.com/containous/flaeg" "github.com/containous/traefik/cmd" - "github.com/containous/traefik/configuration" + "github.com/containous/traefik/old/configuration" ) // NewCmd builds a new HealthCheck command diff --git a/cmd/traefik/traefik.go b/cmd/traefik/traefik.go index 1f0210b65..808b795f9 100644 --- a/cmd/traefik/traefik.go +++ b/cmd/traefik/traefik.go @@ -21,17 +21,20 @@ import ( "github.com/containous/traefik/cmd/storeconfig" cmdVersion "github.com/containous/traefik/cmd/version" "github.com/containous/traefik/collector" - "github.com/containous/traefik/configuration" - "github.com/containous/traefik/configuration/router" + "github.com/containous/traefik/config" + "github.com/containous/traefik/config/static" "github.com/containous/traefik/job" "github.com/containous/traefik/log" - "github.com/containous/traefik/provider/ecs" - "github.com/containous/traefik/provider/kubernetes" + "github.com/containous/traefik/old/configuration" + "github.com/containous/traefik/old/provider/ecs" + "github.com/containous/traefik/old/provider/kubernetes" + "github.com/containous/traefik/old/types" + "github.com/containous/traefik/provider/aggregator" "github.com/containous/traefik/safe" "github.com/containous/traefik/server" + "github.com/containous/traefik/server/router" "github.com/containous/traefik/server/uuid" traefiktls "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" "github.com/containous/traefik/version" "github.com/coreos/go-systemd/daemon" "github.com/elazarl/go-bindata-assetfs" @@ -186,7 +189,7 @@ func runCmd(globalConfiguration *configuration.GlobalConfiguration, configFile s stats(globalConfiguration) - providerAggregator := configuration.NewProviderAggregator(globalConfiguration) + providerAggregator := aggregator.NewProviderAggregator(static.ConvertStaticConf(*globalConfiguration)) acmeProvider, err := globalConfiguration.InitACMEProvider() if err != nil { @@ -199,18 +202,15 @@ func runCmd(globalConfiguration *configuration.GlobalConfiguration, configFile s } entryPoints := map[string]server.EntryPoint{} + staticConf := static.ConvertStaticConf(*globalConfiguration) for entryPointName, config := range globalConfiguration.EntryPoints { - + factory := router.NewRouteAppenderFactory(staticConf, entryPointName, acmeProvider) entryPoint := server.EntryPoint{ - Configuration: config, + RouteAppenderFactory: factory, + Configuration: config, } - internalRouter := router.NewInternalRouterAggregator(*globalConfiguration, entryPointName) if acmeProvider != nil { - if acmeProvider.HTTPChallenge != nil && entryPointName == acmeProvider.HTTPChallenge.EntryPoint { - internalRouter.AddRouter(acmeProvider) - } - // TLS ALPN 01 if acmeProvider.TLSChallenge != nil && acmeProvider.HTTPChallenge == nil && acmeProvider.DNSChallenge == nil { entryPoint.TLSALPNGetter = acmeProvider.GetTLSALPNCertificate @@ -227,19 +227,19 @@ func runCmd(globalConfiguration *configuration.GlobalConfiguration, configFile s } } - entryPoint.InternalRouter = internalRouter entryPoints[entryPointName] = entryPoint } svr := server.NewServer(*globalConfiguration, providerAggregator, entryPoints) + if acmeProvider != nil && acmeProvider.OnHostRule { - acmeProvider.SetConfigListenerChan(make(chan types.Configuration)) + acmeProvider.SetConfigListenerChan(make(chan config.Configuration)) svr.AddListener(acmeProvider.ListenConfiguration) } ctx := cmd.ContextWithSignal(context.Background()) - if globalConfiguration.Ping != nil { - globalConfiguration.Ping.WithContext(ctx) + if staticConf.Ping != nil { + staticConf.Ping.WithContext(ctx) } svr.StartWithContext(ctx) diff --git a/collector/collector.go b/collector/collector.go index de492a376..5a48112dd 100644 --- a/collector/collector.go +++ b/collector/collector.go @@ -10,8 +10,8 @@ import ( "time" "github.com/containous/traefik/anonymize" - "github.com/containous/traefik/configuration" "github.com/containous/traefik/log" + "github.com/containous/traefik/old/configuration" "github.com/containous/traefik/version" "github.com/mitchellh/hashstructure" ) diff --git a/config/dyn_config.go b/config/dyn_config.go new file mode 100644 index 000000000..1a7ed5d50 --- /dev/null +++ b/config/dyn_config.go @@ -0,0 +1,160 @@ +package config + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + + traefiktls "github.com/containous/traefik/tls" +) + +// Router holds the router configuration. +type Router struct { + EntryPoints []string `json:"entryPoints"` + Middlewares []string `json:"middlewares,omitempty" toml:",omitempty"` + Service string `json:"service,omitempty" toml:",omitempty"` + Rule string `json:"rule,omitempty" toml:",omitempty"` + Priority int `json:"priority,omitempty" toml:"priority,omitzero"` +} + +// LoadBalancerService holds the LoadBalancerService configuration. +type LoadBalancerService struct { + Stickiness *Stickiness `json:"stickiness,omitempty" toml:",omitempty"` + Servers []Server `json:"servers,omitempty" toml:",omitempty"` + Method string `json:"method,omitempty" toml:",omitempty"` + HealthCheck *HealthCheck `json:"healthCheck,omitempty" toml:",omitempty"` + PassHostHeader bool `json:"passHostHeader" toml:",omitempty"` + ResponseForwarding *ResponseForwarding `json:"forwardingResponse,omitempty" toml:",omitempty"` +} + +// ResponseForwarding holds configuration for the forward of the response. +type ResponseForwarding struct { + FlushInterval string `json:"flushInterval,omitempty" toml:",omitempty"` +} + +// Stickiness holds the stickiness configuration. +type Stickiness struct { + CookieName string `json:"cookieName,omitempty" toml:",omitempty"` +} + +// Server holds the server configuration. +type Server struct { + URL string `json:"url"` + Weight int `json:"weight"` +} + +// HealthCheck holds the HealthCheck configuration. +type HealthCheck struct { + Scheme string `json:"scheme,omitempty" toml:",omitempty"` + Path string `json:"path,omitempty" toml:",omitempty"` + Port int `json:"port,omitempty" toml:",omitempty,omitzero"` + // FIXME change string to parse.Duration + Interval string `json:"interval,omitempty" toml:",omitempty"` + // FIXME change string to parse.Duration + Timeout string `json:"timeout,omitempty" toml:",omitempty"` + Hostname string `json:"hostname,omitempty" toml:",omitempty"` + Headers map[string]string `json:"headers,omitempty" toml:",omitempty"` +} + +// ClientTLS holds the TLS specific configurations as client +// CA, Cert and Key can be either path or file contents. +type ClientTLS struct { + CA string `description:"TLS CA" json:"ca,omitempty"` + CAOptional bool `description:"TLS CA.Optional" json:"caOptional,omitempty"` + Cert string `description:"TLS cert" json:"cert,omitempty"` + Key string `description:"TLS key" json:"key,omitempty"` + InsecureSkipVerify bool `description:"TLS insecure skip verify" json:"insecureSkipVerify,omitempty"` +} + +// CreateTLSConfig creates a TLS config from ClientTLS structures. +func (clientTLS *ClientTLS) CreateTLSConfig() (*tls.Config, error) { + if clientTLS == nil { + return nil, nil + } + + var err error + caPool := x509.NewCertPool() + clientAuth := tls.NoClientCert + if clientTLS.CA != "" { + var ca []byte + if _, errCA := os.Stat(clientTLS.CA); errCA == nil { + ca, err = ioutil.ReadFile(clientTLS.CA) + if err != nil { + return nil, fmt.Errorf("failed to read CA. %s", err) + } + } else { + ca = []byte(clientTLS.CA) + } + + if !caPool.AppendCertsFromPEM(ca) { + return nil, fmt.Errorf("failed to parse CA") + } + + if clientTLS.CAOptional { + clientAuth = tls.VerifyClientCertIfGiven + } else { + clientAuth = tls.RequireAndVerifyClientCert + } + } + + cert := tls.Certificate{} + _, errKeyIsFile := os.Stat(clientTLS.Key) + + if !clientTLS.InsecureSkipVerify && (len(clientTLS.Cert) == 0 || len(clientTLS.Key) == 0) { + return nil, fmt.Errorf("TLS Certificate or Key file must be set when TLS configuration is created") + } + + if len(clientTLS.Cert) > 0 && len(clientTLS.Key) > 0 { + if _, errCertIsFile := os.Stat(clientTLS.Cert); errCertIsFile == nil { + if errKeyIsFile == nil { + cert, err = tls.LoadX509KeyPair(clientTLS.Cert, clientTLS.Key) + if err != nil { + return nil, fmt.Errorf("failed to load TLS keypair: %v", err) + } + } else { + return nil, fmt.Errorf("tls cert is a file, but tls key is not") + } + } else { + if errKeyIsFile != nil { + cert, err = tls.X509KeyPair([]byte(clientTLS.Cert), []byte(clientTLS.Key)) + if err != nil { + return nil, fmt.Errorf("failed to load TLS keypair: %v", err) + + } + } else { + return nil, fmt.Errorf("TLS key is a file, but tls cert is not") + } + } + } + + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caPool, + InsecureSkipVerify: clientTLS.InsecureSkipVerify, + ClientAuth: clientAuth, + }, nil +} + +// Message holds configuration information exchanged between parts of traefik. +type Message struct { + ProviderName string + Configuration *Configuration +} + +// Configurations is for currentConfigurations Map. +type Configurations map[string]*Configuration + +// Configuration FIXME better name? +type Configuration struct { + Routers map[string]*Router `json:"routers,omitempty" toml:",omitempty"` + Middlewares map[string]*Middleware `json:"middlewares,omitempty" toml:",omitempty"` + Services map[string]*Service `json:"services,omitempty" toml:",omitempty"` + TLS []*traefiktls.Configuration `json:"-"` +} + +// Service holds a service configuration (can only be of one type at the same time). +type Service struct { + LoadBalancer *LoadBalancerService `json:"loadbalancer,omitempty" toml:",omitempty,omitzero"` +} diff --git a/config/middlewares.go b/config/middlewares.go new file mode 100644 index 000000000..cc0984bef --- /dev/null +++ b/config/middlewares.go @@ -0,0 +1,274 @@ +package config + +import ( + "github.com/containous/flaeg/parse" + "github.com/containous/traefik/ip" +) + +// Middleware holds the Middleware configuration. +type Middleware struct { + AddPrefix *AddPrefix `json:"addPrefix,omitempty"` + StripPrefix *StripPrefix `json:"stripPrefix,omitempty"` + StripPrefixRegex *StripPrefixRegex `json:"stripPrefixRegex,omitempty"` + ReplacePath *ReplacePath `json:"replacePath,omitempty"` + ReplacePathRegex *ReplacePathRegex `json:"replacePathRegex,omitempty"` + Chain *Chain `json:"chain,omitempty"` + IPWhiteList *IPWhiteList `json:"ipWhiteList,omitempty"` + Headers *Headers `json:"headers,omitempty"` + Errors *ErrorPage `json:"errors,omitempty"` + RateLimit *RateLimit `json:"rateLimit,omitempty"` + Redirect *Redirect `json:"redirect,omitempty"` + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + DigestAuth *DigestAuth `json:"digestAuth,omitempty"` + ForwardAuth *ForwardAuth `json:"forwardAuth,omitempty"` + MaxConn *MaxConn `json:"maxConn,omitempty"` + Buffering *Buffering `json:"buffering,omitempty"` + CircuitBreaker *CircuitBreaker `json:"circuitBreaker,omitempty"` + Compress *Compress `json:"compress,omitempty"` + PassTLSClientCert *PassTLSClientCert `json:"passTLSClientCert,omitempty"` + Retry *Retry `json:"retry,omitempty"` +} + +// AddPrefix holds the AddPrefix configuration. +type AddPrefix struct { + Prefix string `json:"prefix,omitempty"` +} + +// Auth holds the authentication configuration (BASIC, DIGEST, users). +type Auth struct { + Basic *BasicAuth `json:"basic,omitempty" export:"true"` + Digest *DigestAuth `json:"digest,omitempty" export:"true"` + Forward *ForwardAuth `json:"forward,omitempty" export:"true"` +} + +// BasicAuth holds the HTTP basic authentication configuration. +type BasicAuth struct { + Users `json:"users,omitempty" mapstructure:","` + UsersFile string `json:"usersFile,omitempty"` + Realm string `json:"realm,omitempty"` + RemoveHeader bool `json:"removeHeader,omitempty"` + HeaderField string `json:"headerField,omitempty" export:"true"` +} + +// Buffering holds the request/response buffering configuration. +type Buffering struct { + MaxRequestBodyBytes int64 `json:"maxRequestBodyBytes,omitempty"` + MemRequestBodyBytes int64 `json:"memRequestBodyBytes,omitempty"` + MaxResponseBodyBytes int64 `json:"maxResponseBodyBytes,omitempty"` + MemResponseBodyBytes int64 `json:"memResponseBodyBytes,omitempty"` + RetryExpression string `json:"retryExpression,omitempty"` +} + +// Chain holds a chain of middlewares +type Chain struct { + Middlewares []string `json:"middlewares"` +} + +// CircuitBreaker holds the circuit breaker configuration. +type CircuitBreaker struct { + Expression string `json:"expression,omitempty"` +} + +// Compress holds the compress configuration. +type Compress struct{} + +// DigestAuth holds the Digest HTTP authentication configuration. +type DigestAuth struct { + Users `json:"users,omitempty" mapstructure:","` + UsersFile string `json:"usersFile,omitempty"` + RemoveHeader bool `json:"removeHeader,omitempty"` + Realm string `json:"realm,omitempty" mapstructure:","` + HeaderField string `json:"headerField,omitempty" export:"true"` +} + +// ErrorPage holds the custom error page configuration. +type ErrorPage struct { + Status []string `json:"status,omitempty"` + Service string `json:"service,omitempty"` + Query string `json:"query,omitempty"` +} + +// ForwardAuth holds the http forward authentication configuration. +type ForwardAuth struct { + Address string `description:"Authentication server address" json:"address,omitempty"` + TLS *ClientTLS `description:"Enable TLS support" json:"tls,omitempty" export:"true"` + TrustForwardHeader bool `description:"Trust X-Forwarded-* headers" json:"trustForwardHeader,omitempty" export:"true"` + AuthResponseHeaders []string `description:"Headers to be forwarded from auth response" json:"authResponseHeaders,omitempty"` +} + +// Headers holds the custom header configuration. +type Headers struct { + CustomRequestHeaders map[string]string `json:"customRequestHeaders,omitempty"` + CustomResponseHeaders map[string]string `json:"customResponseHeaders,omitempty"` + + AllowedHosts []string `json:"allowedHosts,omitempty"` + HostsProxyHeaders []string `json:"hostsProxyHeaders,omitempty"` + SSLRedirect bool `json:"sslRedirect,omitempty"` + SSLTemporaryRedirect bool `json:"sslTemporaryRedirect,omitempty"` + SSLHost string `json:"sslHost,omitempty"` + SSLProxyHeaders map[string]string `json:"sslProxyHeaders,omitempty"` + SSLForceHost bool `json:"sslForceHost,omitempty"` + STSSeconds int64 `json:"stsSeconds,omitempty"` + STSIncludeSubdomains bool `json:"stsIncludeSubdomains,omitempty"` + STSPreload bool `json:"stsPreload,omitempty"` + ForceSTSHeader bool `json:"forceSTSHeader,omitempty"` + FrameDeny bool `json:"frameDeny,omitempty"` + CustomFrameOptionsValue string `json:"customFrameOptionsValue,omitempty"` + ContentTypeNosniff bool `json:"contentTypeNosniff,omitempty"` + BrowserXSSFilter bool `json:"browserXssFilter,omitempty"` + CustomBrowserXSSValue string `json:"customBrowserXSSValue,omitempty"` + ContentSecurityPolicy string `json:"contentSecurityPolicy,omitempty"` + PublicKey string `json:"publicKey,omitempty"` + ReferrerPolicy string `json:"referrerPolicy,omitempty"` + IsDevelopment bool `json:"isDevelopment,omitempty"` +} + +// HasCustomHeadersDefined checks to see if any of the custom header elements have been set +func (h *Headers) HasCustomHeadersDefined() bool { + return h != nil && (len(h.CustomResponseHeaders) != 0 || + len(h.CustomRequestHeaders) != 0) +} + +// HasSecureHeadersDefined checks to see if any of the secure header elements have been set +func (h *Headers) HasSecureHeadersDefined() bool { + return h != nil && (len(h.AllowedHosts) != 0 || + len(h.HostsProxyHeaders) != 0 || + h.SSLRedirect || + h.SSLTemporaryRedirect || + h.SSLForceHost || + h.SSLHost != "" || + len(h.SSLProxyHeaders) != 0 || + h.STSSeconds != 0 || + h.STSIncludeSubdomains || + h.STSPreload || + h.ForceSTSHeader || + h.FrameDeny || + h.CustomFrameOptionsValue != "" || + h.ContentTypeNosniff || + h.BrowserXSSFilter || + h.CustomBrowserXSSValue != "" || + h.ContentSecurityPolicy != "" || + h.PublicKey != "" || + h.ReferrerPolicy != "" || + h.IsDevelopment) +} + +// IPStrategy holds the ip strategy configuration. +type IPStrategy struct { + Depth int `json:"depth,omitempty" export:"true"` + ExcludedIPs []string `json:"excludedIPs,omitempty"` +} + +// Get an IP selection strategy +// if nil return the RemoteAddr strategy +// else return a strategy base on the configuration using the X-Forwarded-For Header. +// Depth override the ExcludedIPs +func (s *IPStrategy) Get() (ip.Strategy, error) { + if s == nil { + return &ip.RemoteAddrStrategy{}, nil + } + + if s.Depth > 0 { + return &ip.DepthStrategy{ + Depth: s.Depth, + }, nil + } + + if len(s.ExcludedIPs) > 0 { + checker, err := ip.NewChecker(s.ExcludedIPs) + if err != nil { + return nil, err + } + return &ip.CheckerStrategy{ + Checker: checker, + }, nil + } + + return &ip.RemoteAddrStrategy{}, nil +} + +// IPWhiteList holds the ip white list configuration. +type IPWhiteList struct { + SourceRange []string `json:"sourceRange,omitempty"` + IPStrategy *IPStrategy `json:"ipStrategy,omitempty"` +} + +// MaxConn holds maximum connection configuration. +type MaxConn struct { + Amount int64 `json:"amount,omitempty"` + ExtractorFunc string `json:"extractorFunc,omitempty"` +} + +// PassTLSClientCert holds the TLS client cert headers configuration. +type PassTLSClientCert struct { + PEM bool `description:"Enable header with escaped client pem" json:"pem"` + Infos *TLSClientCertificateInfos `description:"Enable header with configured client cert infos" json:"infos,omitempty"` +} + +// Rate holds the rate limiting configuration for a specific time period. +type Rate struct { + Period parse.Duration `json:"period,omitempty"` + Average int64 `json:"average,omitempty"` + Burst int64 `json:"burst,omitempty"` +} + +// RateLimit holds the rate limiting configuration for a given frontend. +type RateLimit struct { + RateSet map[string]*Rate `json:"rateset,omitempty"` + // FIXME replace by ipStrategy see oxy and replace + ExtractorFunc string `json:"extractorFunc,omitempty"` +} + +// Redirect holds the redirection configuration of an entry point to another, or to an URL. +type Redirect struct { + Regex string `json:"regex,omitempty"` + Replacement string `json:"replacement,omitempty"` + Permanent bool `json:"permanent,omitempty"` +} + +// ReplacePath holds the ReplacePath configuration. +type ReplacePath struct { + Path string `json:"path,omitempty"` +} + +// ReplacePathRegex holds the ReplacePathRegex configuration. +type ReplacePathRegex struct { + Regex string `json:"regex,omitempty"` + Replacement string `json:"replacement,omitempty"` +} + +// Retry contains request retry config +type Retry struct { + Attempts int `description:"Number of attempts" export:"true"` +} + +// StripPrefix holds the StripPrefix configuration. +type StripPrefix struct { + Prefixes []string `json:"prefixes,omitempty"` +} + +// StripPrefixRegex holds the StripPrefixRegex configuration. +type StripPrefixRegex struct { + Regex []string `json:"regex,omitempty"` +} + +// TLSClientCertificateInfos holds the client TLS certificate infos configuration. +type TLSClientCertificateInfos struct { + NotAfter bool `description:"Add NotAfter info in header" json:"notAfter"` + NotBefore bool `description:"Add NotBefore info in header" json:"notBefore"` + Subject *TLSCLientCertificateSubjectInfos `description:"Add Subject info in header" json:"subject,omitempty"` + Sans bool `description:"Add Sans info in header" json:"sans"` +} + +// TLSCLientCertificateSubjectInfos holds the client TLS certificate subject infos configuration. +type TLSCLientCertificateSubjectInfos struct { + Country bool `description:"Add Country info in header" json:"country"` + Province bool `description:"Add Province info in header" json:"province"` + Locality bool `description:"Add Locality info in header" json:"locality"` + Organization bool `description:"Add Organization info in header" json:"organization"` + CommonName bool `description:"Add CommonName info in header" json:"commonName"` + SerialNumber bool `description:"Add SerialNumber info in header" json:"serialNumber"` +} + +// Users holds a list of users +type Users []string diff --git a/config/static/convert.go b/config/static/convert.go new file mode 100644 index 000000000..ac7336198 --- /dev/null +++ b/config/static/convert.go @@ -0,0 +1,246 @@ +package static + +import ( + oldapi "github.com/containous/traefik/old/api" + "github.com/containous/traefik/old/configuration" + oldtracing "github.com/containous/traefik/old/middlewares/tracing" + oldfile "github.com/containous/traefik/old/provider/file" + oldtypes "github.com/containous/traefik/old/types" + "github.com/containous/traefik/ping" + "github.com/containous/traefik/provider" + "github.com/containous/traefik/provider/file" + "github.com/containous/traefik/tracing/datadog" + "github.com/containous/traefik/tracing/jaeger" + "github.com/containous/traefik/tracing/zipkin" + "github.com/containous/traefik/types" +) + +// ConvertStaticConf FIXME sugar +// Deprecated +func ConvertStaticConf(globalConfiguration configuration.GlobalConfiguration) Configuration { + staticConfiguration := Configuration{} + + staticConfiguration.EntryPoints = &EntryPoints{ + EntryPointList: make(EntryPointList), + Defaults: globalConfiguration.DefaultEntryPoints, + } + + if globalConfiguration.EntryPoints != nil { + for name, ep := range globalConfiguration.EntryPoints { + staticConfiguration.EntryPoints.EntryPointList[name] = EntryPoint{ + Address: ep.Address, + } + } + } + + if globalConfiguration.Ping != nil { + old := globalConfiguration.Ping + staticConfiguration.Ping = &ping.Handler{ + EntryPoint: old.EntryPoint, + } + } + + staticConfiguration.API = convertAPI(globalConfiguration.API) + staticConfiguration.Constraints = convertConstraints(globalConfiguration.Constraints) + staticConfiguration.File = convertFile(globalConfiguration.File) + staticConfiguration.Metrics = ConvertMetrics(globalConfiguration.Metrics) + staticConfiguration.AccessLog = ConvertAccessLog(globalConfiguration.AccessLog) + staticConfiguration.Tracing = ConvertTracing(globalConfiguration.Tracing) + staticConfiguration.HostResolver = ConvertHostResolverConfig(globalConfiguration.HostResolver) + + return staticConfiguration +} + +// ConvertAccessLog FIXME sugar +// Deprecated +func ConvertAccessLog(old *oldtypes.AccessLog) *types.AccessLog { + if old == nil { + return nil + } + + accessLog := &types.AccessLog{ + FilePath: old.FilePath, + Format: old.Format, + BufferingSize: old.BufferingSize, + } + + if old.Filters != nil { + accessLog.Filters = &types.AccessLogFilters{ + StatusCodes: types.StatusCodes(old.Filters.StatusCodes), + RetryAttempts: old.Filters.RetryAttempts, + MinDuration: old.Filters.MinDuration, + } + } + + if old.Fields != nil { + accessLog.Fields = &types.AccessLogFields{ + DefaultMode: old.Fields.DefaultMode, + Names: types.FieldNames(old.Fields.Names), + } + + if old.Fields.Headers != nil { + accessLog.Fields.Headers = &types.FieldHeaders{ + DefaultMode: old.Fields.Headers.DefaultMode, + Names: types.FieldHeaderNames(old.Fields.Headers.Names), + } + } + } + + return accessLog +} + +// ConvertMetrics FIXME sugar +// Deprecated +func ConvertMetrics(old *oldtypes.Metrics) *types.Metrics { + if old == nil { + return nil + } + + metrics := &types.Metrics{} + + if old.Prometheus != nil { + metrics.Prometheus = &types.Prometheus{ + EntryPoint: old.Prometheus.EntryPoint, + Buckets: types.Buckets(old.Prometheus.Buckets), + } + } + + if old.Datadog != nil { + metrics.Datadog = &types.Datadog{ + Address: old.Datadog.Address, + PushInterval: old.Datadog.PushInterval, + } + } + + if old.StatsD != nil { + metrics.StatsD = &types.Statsd{ + Address: old.StatsD.Address, + PushInterval: old.StatsD.PushInterval, + } + } + if old.InfluxDB != nil { + metrics.InfluxDB = &types.InfluxDB{ + Address: old.InfluxDB.Address, + Protocol: old.InfluxDB.Protocol, + PushInterval: old.InfluxDB.PushInterval, + Database: old.InfluxDB.Database, + RetentionPolicy: old.InfluxDB.RetentionPolicy, + Username: old.InfluxDB.Username, + Password: old.InfluxDB.Password, + } + } + + return metrics +} + +// ConvertTracing FIXME sugar +// Deprecated +func ConvertTracing(old *oldtracing.Tracing) *Tracing { + if old == nil { + return nil + } + + tra := &Tracing{ + Backend: old.Backend, + ServiceName: old.ServiceName, + SpanNameLimit: old.SpanNameLimit, + } + + if old.Jaeger != nil { + tra.Jaeger = &jaeger.Config{ + SamplingServerURL: old.Jaeger.SamplingServerURL, + SamplingType: old.Jaeger.SamplingType, + SamplingParam: old.Jaeger.SamplingParam, + LocalAgentHostPort: old.Jaeger.LocalAgentHostPort, + Gen128Bit: old.Jaeger.Gen128Bit, + Propagation: old.Jaeger.Propagation, + } + } + + if old.Zipkin != nil { + tra.Zipkin = &zipkin.Config{ + HTTPEndpoint: old.Zipkin.HTTPEndpoint, + SameSpan: old.Zipkin.SameSpan, + ID128Bit: old.Zipkin.ID128Bit, + Debug: old.Zipkin.Debug, + } + } + + if old.DataDog != nil { + tra.DataDog = &datadog.Config{ + LocalAgentHostPort: old.DataDog.LocalAgentHostPort, + GlobalTag: old.DataDog.GlobalTag, + Debug: old.DataDog.Debug, + } + } + + return tra +} + +func convertAPI(old *oldapi.Handler) *API { + if old == nil { + return nil + } + + api := &API{ + EntryPoint: old.EntryPoint, + Dashboard: old.Dashboard, + DashboardAssets: old.DashboardAssets, + } + + if old.Statistics != nil { + api.Statistics = &types.Statistics{ + RecentErrors: old.Statistics.RecentErrors, + } + } + + return api +} + +func convertConstraints(oldConstraints oldtypes.Constraints) types.Constraints { + constraints := types.Constraints{} + for _, value := range oldConstraints { + constraint := &types.Constraint{ + Key: value.Key, + MustMatch: value.MustMatch, + Regex: value.Regex, + } + + constraints = append(constraints, constraint) + } + return constraints +} + +func convertFile(old *oldfile.Provider) *file.Provider { + if old == nil { + return nil + } + + f := &file.Provider{ + BaseProvider: provider.BaseProvider{ + Watch: old.Watch, + Filename: old.Filename, + Trace: old.Trace, + }, + Directory: old.Directory, + TraefikFile: old.TraefikFile, + } + f.DebugLogGeneratedTemplate = old.DebugLogGeneratedTemplate + f.Constraints = convertConstraints(old.Constraints) + + return f +} + +// ConvertHostResolverConfig FIXME +// Deprecated +func ConvertHostResolverConfig(oldconfig *configuration.HostResolverConfig) *HostResolverConfig { + if oldconfig == nil { + return nil + } + + return &HostResolverConfig{ + CnameFlattening: oldconfig.CnameFlattening, + ResolvConfig: oldconfig.ResolvConfig, + ResolvDepth: oldconfig.ResolvDepth, + } +} diff --git a/config/static/static_config.go b/config/static/static_config.go new file mode 100644 index 000000000..61ceb366b --- /dev/null +++ b/config/static/static_config.go @@ -0,0 +1,113 @@ +package static + +import ( + "github.com/containous/flaeg/parse" + "github.com/containous/traefik/ping" + "github.com/containous/traefik/provider/file" + "github.com/containous/traefik/tls" + "github.com/containous/traefik/tracing/datadog" + "github.com/containous/traefik/tracing/jaeger" + "github.com/containous/traefik/tracing/zipkin" + "github.com/containous/traefik/types" + "github.com/elazarl/go-bindata-assetfs" +) + +// Configuration FIXME temp static configuration +type Configuration struct { + Global *Global + EntryPoints *EntryPoints + + API *API `description:"Enable api/dashboard" export:"true"` + Metrics *types.Metrics `description:"Enable a metrics exporter" export:"true"` + Ping *ping.Handler `description:"Enable ping" export:"true"` + // Rest *rest.Provider `description:"Enable Rest backend with default settings" export:"true"` + + Log *types.TraefikLog + AccessLog *types.AccessLog `description:"Access log settings" export:"true"` + Tracing *Tracing `description:"OpenTracing configuration" export:"true"` + + File *file.Provider `description:"Enable File backend with default settings" export:"true"` + Constraints types.Constraints `description:"Filter services by constraint, matching with service tags" export:"true"` + + HostResolver *HostResolverConfig `description:"Enable CNAME Flattening" export:"true"` + + // TODO + // ACME *acme.ACME `description:"Enable ACME (Let's Encrypt): automatic SSL" export:"true"` + // Retry *Retry `description:"Enable retry sending request if network error" export:"true"` + // HealthCheck *HealthCheckConfig `description:"Health check parameters" export:"true"` + // + +} + +// Global holds the global configuration. +type Global struct { + Debug bool `short:"d" description:"Enable debug mode" export:"true"` + CheckNewVersion bool `description:"Periodically check if a new version has been released" export:"true"` + SendAnonymousUsage bool `description:"send periodically anonymous usage statistics" export:"true"` + InsecureSkipVerify bool `description:"Disable SSL certificate verification" export:"true"` + RootCAs tls.FilesOrContents `description:"Add cert file for self-signed certificate"` + ProvidersThrottleDuration parse.Duration `description:"Backends throttle duration: minimum duration between 2 events from providers before applying a new configuration. It avoids unnecessary reloads if multiples events are sent in a short amount of time." export:"true"` + LifeCycle *LifeCycle `description:"Timeouts influencing the server life cycle" export:"true"` + RespondingTimeouts *RespondingTimeouts `description:"Timeouts for incoming requests to the Traefik instance" export:"true"` + ForwardingTimeouts *ForwardingTimeouts `description:"Timeouts for requests forwarded to the backend servers" export:"true"` + MaxIdleConnsPerHost int `description:"If non-zero, controls the maximum idle (keep-alive) to keep per-host. If zero, DefaultMaxIdleConnsPerHost is used" export:"true"` +} + +// API holds the API configuration +type API struct { + EntryPoint string `description:"EntryPoint" export:"true"` + Dashboard bool `description:"Activate dashboard" export:"true"` + Statistics *types.Statistics `description:"Enable more detailed statistics" export:"true"` + Middlewares []string `description:"Middleware list" export:"true"` + DashboardAssets *assetfs.AssetFS `json:"-"` +} + +// RespondingTimeouts contains timeout configurations for incoming requests to the Traefik instance. +type RespondingTimeouts struct { + ReadTimeout parse.Duration `description:"ReadTimeout is the maximum duration for reading the entire request, including the body. If zero, no timeout is set" export:"true"` + WriteTimeout parse.Duration `description:"WriteTimeout is the maximum duration before timing out writes of the response. If zero, no timeout is set" export:"true"` + IdleTimeout parse.Duration `description:"IdleTimeout is the maximum amount duration an idle (keep-alive) connection will remain idle before closing itself. Defaults to 180 seconds. If zero, no timeout is set" export:"true"` +} + +// ForwardingTimeouts contains timeout configurations for forwarding requests to the backend servers. +type ForwardingTimeouts struct { + DialTimeout parse.Duration `description:"The amount of time to wait until a connection to a backend server can be established. Defaults to 30 seconds. If zero, no timeout exists" export:"true"` + ResponseHeaderTimeout parse.Duration `description:"The amount of time to wait for a server's response headers after fully writing the request (including its body, if any). If zero, no timeout exists" export:"true"` +} + +// LifeCycle contains configurations relevant to the lifecycle (such as the shutdown phase) of Traefik. +type LifeCycle struct { + RequestAcceptGraceTimeout parse.Duration `description:"Duration to keep accepting requests before Traefik initiates the graceful shutdown procedure"` + GraceTimeOut parse.Duration `description:"Duration to give active requests a chance to finish before Traefik stops"` +} + +// EntryPoint holds the entry point configuration +type EntryPoint struct { + Address string +} + +// EntryPointList holds the HTTP entry point list type. +type EntryPointList map[string]EntryPoint + +// EntryPoints holds the entry points configuration. +type EntryPoints struct { + EntryPointList + Defaults []string +} + +// Tracing holds the tracing configuration. +type Tracing struct { + Backend string `description:"Selects the tracking backend ('jaeger','zipkin', 'datadog')." export:"true"` + ServiceName string `description:"Set the name for this service" export:"true"` + SpanNameLimit int `description:"Set the maximum character limit for Span names (default 0 = no limit)" export:"true"` + Jaeger *jaeger.Config `description:"Settings for jaeger"` + Zipkin *zipkin.Config `description:"Settings for zipkin"` + DataDog *datadog.Config `description:"Settings for DataDog"` +} + +// HostResolverConfig contain configuration for CNAME Flattening. +type HostResolverConfig struct { + CnameFlattening bool `description:"A flag to enable/disable CNAME flattening" export:"true"` + ResolvConfig string `description:"resolv.conf used for DNS resolving" export:"true"` + ResolvDepth int `description:"The maximal depth of DNS recursive resolving" export:"true"` +} diff --git a/docs/configuration/commons.md b/docs/configuration/commons.md index 7eb241124..6460bbc61 100644 --- a/docs/configuration/commons.md +++ b/docs/configuration/commons.md @@ -83,7 +83,7 @@ If you encounter 'too many open files' errors, you can either increase this valu - `defaultEntryPoints`: Entrypoints to be used by frontends that do not specify any entrypoint. Each frontend can specify its own entrypoints. -- `keepTrailingSlash`: Tells Træfik whether it should keep the trailing slashes that might be present in the paths of incoming requests (true), or if it should redirect to the slashless version of the URL (default behavior: false) +- `keepTrailingSlash`: Tells Traefik whether it should keep the trailing slashes that might be present in the paths of incoming requests (true), or if it should redirect to the slashless version of the URL (default behavior: false) !!! note Beware that the value of `keepTrailingSlash` can have a significant impact on the way your frontend rules are interpreted. diff --git a/docs/configuration/entrypoints.md b/docs/configuration/entrypoints.md index 6e0c9fbc5..94ce8f75f 100644 --- a/docs/configuration/entrypoints.md +++ b/docs/configuration/entrypoints.md @@ -486,7 +486,7 @@ Responses are compressed when: ## White Listing -Træfik supports whitelisting to accept or refuse requests based on the client IP. +Traefik supports whitelisting to accept or refuse requests based on the client IP. The following example enables IP white listing and accepts requests from client IPs defined in `sourceRange`. @@ -501,7 +501,7 @@ The following example enables IP white listing and accepts requests from client # Override the clientIPStrategy ``` -By default, Træfik uses the client IP (see [ClientIPStrategy](/configuration/entrypoints/#clientipstrategy)) for the whitelisting. +By default, Traefik uses the client IP (see [ClientIPStrategy](/configuration/entrypoints/#clientipstrategy)) for the whitelisting. If you want to use another IP than the one determined by `ClientIPStrategy` for the whitelisting, you can define the `IPStrategy` option: @@ -522,7 +522,7 @@ In the above example, if the value of the `X-Forwarded-For` header was `"10.0.0. ## ClientIPStrategy -The `clientIPStrategy` defines how you want Træfik to determine the client IP (used for whitelisting for example). +The `clientIPStrategy` defines how you want Traefik to determine the client IP (used for whitelisting for example). There are several option available: @@ -560,7 +560,7 @@ Examples: ### Excluded IPs -Træfik will scan the `X-Forwarded-For` header (from the right) and pick the first IP not in the `excludedIPs` list. +Traefik will scan the `X-Forwarded-For` header (from the right) and pick the first IP not in the `excludedIPs` list. ```toml [entryPoints] @@ -586,7 +586,7 @@ Examples: ### Default -If there are no `depth` or `excludedIPs`, then the client IP is the IP of the computer that initiated the connection with the Træfik server (the remote address). +If there are no `depth` or `excludedIPs`, then the client IP is the IP of the computer that initiated the connection with the Traefik server (the remote address). ## ProxyProtocol diff --git a/h2c/h2c.go b/h2c/h2c.go index c5961620a..e795955fc 100644 --- a/h2c/h2c.go +++ b/h2c/h2c.go @@ -114,7 +114,7 @@ func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) { conn.Close() if http2VerboseLogs { - log.Printf( + log.Infof( "Missing the request body portion of the client preface. Wanted: %v Got: %v", []byte(expectedBody), buf[0:n], diff --git a/healthcheck/healthcheck.go b/healthcheck/healthcheck.go index 41fde8ab8..05574fa24 100644 --- a/healthcheck/healthcheck.go +++ b/healthcheck/healthcheck.go @@ -131,50 +131,58 @@ func (hc *HealthCheck) execute(ctx context.Context, backend *BackendConfig) { func (hc *HealthCheck) checkBackend(backend *BackendConfig) { enabledURLs := backend.LB.Servers() var newDisabledURLs []*url.URL + // FIXME re enable metrics for _, disableURL := range backend.disabledURLs { - serverUpMetricValue := float64(0) + //serverUpMetricValue := float64(0) if err := checkHealth(disableURL, backend); err == nil { log.Warnf("Health check up: Returning to server list. Backend: %q URL: %q", backend.name, disableURL.String()) if err := backend.LB.UpsertServer(disableURL, roundrobin.Weight(1)); err != nil { log.Error(err) } - serverUpMetricValue = 1 + //serverUpMetricValue = 1 } else { log.Warnf("Health check still failing. Backend: %q URL: %q Reason: %s", backend.name, disableURL.String(), err) newDisabledURLs = append(newDisabledURLs, disableURL) } - labelValues := []string{"backend", backend.name, "url", disableURL.String()} - hc.metrics.BackendServerUpGauge().With(labelValues...).Set(serverUpMetricValue) + //labelValues := []string{"backend", backend.name, "url", disableURL.String()} + //hc.metrics.BackendServerUpGauge().With(labelValues...).Set(serverUpMetricValue) } backend.disabledURLs = newDisabledURLs + // FIXME re enable metrics for _, enableURL := range enabledURLs { - serverUpMetricValue := float64(1) + //serverUpMetricValue := float64(1) if err := checkHealth(enableURL, backend); err != nil { log.Warnf("Health check failed: Remove from server list. Backend: %q URL: %q Reason: %s", backend.name, enableURL.String(), err) if err := backend.LB.RemoveServer(enableURL); err != nil { log.Error(err) } backend.disabledURLs = append(backend.disabledURLs, enableURL) - serverUpMetricValue = 0 + //serverUpMetricValue = 0 } - labelValues := []string{"backend", backend.name, "url", enableURL.String()} - hc.metrics.BackendServerUpGauge().With(labelValues...).Set(serverUpMetricValue) + //labelValues := []string{"backend", backend.name, "url", enableURL.String()} + //hc.metrics.BackendServerUpGauge().With(labelValues...).Set(serverUpMetricValue) } } +// FIXME re add metrics +//func GetHealthCheck(metrics metricsRegistry) *HealthCheck { + // GetHealthCheck returns the health check which is guaranteed to be a singleton. -func GetHealthCheck(metrics metricsRegistry) *HealthCheck { +func GetHealthCheck() *HealthCheck { once.Do(func() { - singleton = newHealthCheck(metrics) + singleton = newHealthCheck() + //singleton = newHealthCheck(metrics) }) return singleton } -func newHealthCheck(metrics metricsRegistry) *HealthCheck { +// FIXME re add metrics +//func newHealthCheck(metrics metricsRegistry) *HealthCheck { +func newHealthCheck() *HealthCheck { return &HealthCheck{ Backends: make(map[string]*BackendConfig), - metrics: metrics, + //metrics: metrics, } } diff --git a/healthcheck/healthcheck_test.go b/healthcheck/healthcheck_test.go index 756d234c3..c354a4238 100644 --- a/healthcheck/healthcheck_test.go +++ b/healthcheck/healthcheck_test.go @@ -146,7 +146,8 @@ func TestSetBackendsConfiguration(t *testing.T) { assert.Equal(t, test.expectedNumRemovedServers, lb.numRemovedServers, "removed servers") assert.Equal(t, test.expectedNumUpsertedServers, lb.numUpsertedServers, "upserted servers") - assert.Equal(t, test.expectedGaugeValue, collectingMetrics.Gauge.GaugeValue, "ServerUp Gauge") + // FIXME re add metrics + //assert.Equal(t, test.expectedGaugeValue, collectingMetrics.Gauge.GaugeValue, "ServerUp Gauge") }) } } diff --git a/integration/access_log_test.go b/integration/access_log_test.go index 8fcf35bf4..1ab33ad5b 100644 --- a/integration/access_log_test.go +++ b/integration/access_log_test.go @@ -13,7 +13,7 @@ import ( "github.com/containous/traefik/integration/try" "github.com/containous/traefik/log" - "github.com/containous/traefik/middlewares/accesslog" + "github.com/containous/traefik/old/middlewares/accesslog" "github.com/go-check/check" checker "github.com/vdemeester/shakers" ) diff --git a/integration/acme_test.go b/integration/acme_test.go index ebb5342c6..94af17641 100644 --- a/integration/acme_test.go +++ b/integration/acme_test.go @@ -11,9 +11,9 @@ import ( "time" "github.com/containous/traefik/integration/try" - "github.com/containous/traefik/provider/acme" + "github.com/containous/traefik/old/provider/acme" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/testhelpers" - "github.com/containous/traefik/types" "github.com/go-check/check" "github.com/miekg/dns" checker "github.com/vdemeester/shakers" @@ -256,6 +256,8 @@ func (s *AcmeSuite) TestHTTP01OnHostRuleDynamicCertificatesWithWildcard(c *check } func (s *AcmeSuite) TestHTTP01OnDemand(c *check.C) { + c.Skip("on demand") + testCase := acmeTestCase{ traefikConfFilePath: "fixtures/acme/acme_base.toml", template: templateModel{ @@ -272,6 +274,8 @@ func (s *AcmeSuite) TestHTTP01OnDemand(c *check.C) { } func (s *AcmeSuite) TestHTTP01OnDemandStaticCertificatesWithWildcard(c *check.C) { + c.Skip("on demand") + testCase := acmeTestCase{ traefikConfFilePath: "fixtures/acme/acme_tls.toml", template: templateModel{ @@ -288,6 +292,8 @@ func (s *AcmeSuite) TestHTTP01OnDemandStaticCertificatesWithWildcard(c *check.C) } func (s *AcmeSuite) TestHTTP01OnDemandStaticCertificatesWithWildcardMultipleEntrypoints(c *check.C) { + c.Skip("on demand") + testCase := acmeTestCase{ traefikConfFilePath: "fixtures/acme/acme_tls_multiple_entrypoints.toml", template: templateModel{ @@ -304,6 +310,8 @@ func (s *AcmeSuite) TestHTTP01OnDemandStaticCertificatesWithWildcardMultipleEntr } func (s *AcmeSuite) TestHTTP01OnDemandDynamicCertificatesWithWildcard(c *check.C) { + c.Skip("on demand") + testCase := acmeTestCase{ traefikConfFilePath: "fixtures/acme/acme_tls_dynamic.toml", template: templateModel{ diff --git a/integration/consul_catalog_test.go b/integration/consul_catalog_test.go index 7bda5adb6..f663a91fe 100644 --- a/integration/consul_catalog_test.go +++ b/integration/consul_catalog_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/containous/traefik/integration/try" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" "github.com/go-check/check" "github.com/hashicorp/consul/api" checker "github.com/vdemeester/shakers" diff --git a/integration/consul_test.go b/integration/consul_test.go index ef1f274d2..70c94e170 100644 --- a/integration/consul_test.go +++ b/integration/consul_test.go @@ -16,7 +16,7 @@ import ( "github.com/containous/staert" "github.com/containous/traefik/cluster" "github.com/containous/traefik/integration/try" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/types" "github.com/go-check/check" checker "github.com/vdemeester/shakers" ) diff --git a/integration/docker_compose_test.go b/integration/docker_compose_test.go index 2ca65eaac..245ccf249 100644 --- a/integration/docker_compose_test.go +++ b/integration/docker_compose_test.go @@ -8,8 +8,8 @@ import ( "time" "github.com/containous/traefik/integration/try" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/testhelpers" - "github.com/containous/traefik/types" "github.com/go-check/check" checker "github.com/vdemeester/shakers" ) diff --git a/integration/docker_test.go b/integration/docker_test.go index 9e22f7b92..79cd55110 100644 --- a/integration/docker_test.go +++ b/integration/docker_test.go @@ -10,7 +10,7 @@ import ( "time" "github.com/containous/traefik/integration/try" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" "github.com/docker/docker/pkg/namesgenerator" "github.com/go-check/check" d "github.com/libkermit/docker" diff --git a/integration/dynamodb_test.go b/integration/dynamodb_test.go index b3858b3dd..fc376e285 100644 --- a/integration/dynamodb_test.go +++ b/integration/dynamodb_test.go @@ -11,7 +11,7 @@ import ( "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" "github.com/containous/traefik/integration/try" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/types" "github.com/go-check/check" checker "github.com/vdemeester/shakers" ) diff --git a/integration/fake_dns_server.go b/integration/fake_dns_server.go index fa7cba2a4..1d8ff760b 100644 --- a/integration/fake_dns_server.go +++ b/integration/fake_dns_server.go @@ -24,7 +24,7 @@ func (s *handler) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { fakeDNS = "127.0.0.1" } for _, q := range r.Question { - log.Printf("Query -- [%s] %s", q.Name, dns.TypeToString[q.Qtype]) + log.Infof("Query -- [%s] %s", q.Name, dns.TypeToString[q.Qtype]) switch q.Qtype { case dns.TypeA: diff --git a/integration/fixtures/access_log_config.toml b/integration/fixtures/access_log_config.toml index d9aebb735..4be76e703 100644 --- a/integration/fixtures/access_log_config.toml +++ b/integration/fixtures/access_log_config.toml @@ -39,4 +39,4 @@ checkNewVersion = false [docker] exposedByDefault = false domain = "docker.local" - watch = true \ No newline at end of file + watch = true diff --git a/integration/fixtures/acme/acme_base.toml b/integration/fixtures/acme/acme_base.toml index ec07ddf91..1a6659ea8 100644 --- a/integration/fixtures/acme/acme_base.toml +++ b/integration/fixtures/acme/acme_base.toml @@ -40,14 +40,14 @@ defaultEntryPoints = ["http", "https"] [file] -[backends] - [backends.backend] - [backends.backend.servers.server1] +[services] + [services.test.loadbalancer] + [[services.test.loadbalancer.servers]] url = "http://127.0.0.1:9010" weight = 1 -[frontends] - [frontends.frontend] - backend = "backend" - [frontends.frontend.routes.test] - rule = "Host:traefik.acme.wtf" +[routers] + [routers.test] + service = "test" + rule = "Host:traefik.acme.wtf" + entryPoints = ["https"] diff --git a/integration/fixtures/acme/acme_http01_web_path.toml b/integration/fixtures/acme/acme_http01_web_path.toml index 4992ae7fc..5d80d3293 100644 --- a/integration/fixtures/acme/acme_http01_web_path.toml +++ b/integration/fixtures/acme/acme_http01_web_path.toml @@ -37,14 +37,14 @@ path="/traefik" [file] -[backends] - [backends.backend] - [backends.backend.servers.server1] +[services] + [services.test.loadbalancer] + [[services.test.loadbalancer.servers]] url = "http://127.0.0.1:9010" weight = 1 -[frontends] - [frontends.frontend] - backend = "backend" - [frontends.frontend.routes.test] - rule = "Host:traefik.acme.wtf" +[routers] + [routers.test] + service = "test" + rule = "Host:traefik.acme.wtf" + entryPoints = ["https"] diff --git a/integration/fixtures/acme/acme_tls.toml b/integration/fixtures/acme/acme_tls.toml index 061554ff5..85386a3cf 100644 --- a/integration/fixtures/acme/acme_tls.toml +++ b/integration/fixtures/acme/acme_tls.toml @@ -43,14 +43,14 @@ defaultEntryPoints = ["http", "https"] [file] -[backends] - [backends.backend] - [backends.backend.servers.server1] +[services] + [services.test.loadbalancer] + [[services.test.loadbalancer.servers]] url = "http://127.0.0.1:9010" weight = 1 -[frontends] - [frontends.frontend] - backend = "backend" - [frontends.frontend.routes.test] - rule = "Host:traefik.acme.wtf" +[routers] + [routers.test] + service = "test" + rule = "Host:traefik.acme.wtf" + entryPoints = ["https"] diff --git a/integration/fixtures/acme/acme_tls_multiple_entrypoints.toml b/integration/fixtures/acme/acme_tls_multiple_entrypoints.toml index 4288b0225..588343dcb 100644 --- a/integration/fixtures/acme/acme_tls_multiple_entrypoints.toml +++ b/integration/fixtures/acme/acme_tls_multiple_entrypoints.toml @@ -43,17 +43,3 @@ defaultEntryPoints = ["http", "https"] {{end}} [api] - -[file] - -[backends] - [backends.backend] - [backends.backend.servers.server1] - url = "http://127.0.0.1:9010" - weight = 1 - -[frontends] - [frontends.frontend] - backend = "backend" - [frontends.frontend.routes.test] - rule = "Host:traefik.acme.wtf" diff --git a/integration/fixtures/acme/certificates.toml b/integration/fixtures/acme/certificates.toml index 0708d0d80..090409fc2 100644 --- a/integration/fixtures/acme/certificates.toml +++ b/integration/fixtures/acme/certificates.toml @@ -1,17 +1,18 @@ -[backends] - [backends.backend] - [backends.backend.servers.server1] +[services] + [services.test.loadbalancer] + [[services.test.loadbalancer.servers]] url = "http://127.0.0.1:9010" weight = 1 -[frontends] - [frontends.frontend] - backend = "backend" - [frontends.frontend.routes.test] - rule = "Host:traefik.acme.wtf" +[routers] + [routers.test] + service = "test" + rule = "Host:traefik.acme.wtf" + entryPoints = ["https"] + [[tls]] entryPoints = ["https"] [tls.certificate] certFile = "fixtures/acme/ssl/wildcard.crt" - keyFile = "fixtures/acme/ssl/wildcard.key" \ No newline at end of file + keyFile = "fixtures/acme/ssl/wildcard.key" diff --git a/integration/fixtures/error_pages/error.toml b/integration/fixtures/error_pages/error.toml index 7dd432da6..1bc39829e 100644 --- a/integration/fixtures/error_pages/error.toml +++ b/integration/fixtures/error_pages/error.toml @@ -7,24 +7,29 @@ logLevel = "DEBUG" address = ":8080" [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://{{.Server1}}:8989474" - weight = 1 - [backends.error] - [backends.error.servers.error] - url = "http://{{.Server2}}:80" - weight = 1 -[frontends] - [frontends.frontend1] - passHostHeader = true - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host:test.local" - [frontends.frontend1.errors] - [frontends.frontend1.errors.networks] - status = ["500-502", "503-599"] - backend = "error" - query = "/50x.html" +[routers] + [routers.router1] + middlewares = ["error"] + service = "service1" + + [routers.router1.routes.test_1] + rule = "Host:test.local" + +[middlewares] + [middlewares.error.errors] + status = ["500-502", "503-599"] + service = "error" + query = "/50x.html" + +[services] + [services.service1.loadbalancer] + passHostHeader = true + [[services.service1.loadbalancer.servers]] + url = "http://{{.Server1}}:8989474" + weight = 1 + + [services.error.loadbalancer] + [[services.error.loadbalancer.servers]] + url = "http://{{.Server2}}:80" + weight = 1 diff --git a/integration/fixtures/error_pages/simple.toml b/integration/fixtures/error_pages/simple.toml index 372a8f874..5652deb0b 100644 --- a/integration/fixtures/error_pages/simple.toml +++ b/integration/fixtures/error_pages/simple.toml @@ -7,24 +7,29 @@ logLevel = "DEBUG" address = ":8080" [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://{{.Server1}}:80" - weight = 1 - [backends.error] - [backends.error.servers.error] - url = "http://{{.Server2}}:80" - weight = 1 -[frontends] - [frontends.frontend1] - passHostHeader = true - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host:test.local" - [frontends.frontend1.errors] - [frontends.frontend1.errors.networks] - status = ["500-502", "503-599"] - backend = "error" - query = "/50x.html" +[routers] + [routers.router1] + middlewares = ["error"] + service = "service1" + + [routers.router1.routes.test_1] + rule = "Host:test.local" + +[middlewares] + [middlewares.error.errors] + status = ["500-502", "503-599"] + service = "error" + query = "/50x.html" + +[services] + [services.service1.loadbalancer] + passHostHeader = true + [[services.service1.loadbalancer.servers]] + url = "http://{{.Server1}}:80" + weight = 1 + + [services.error.loadbalancer] + [[services.error.loadbalancer.servers]] + url = "http://{{.Server2}}:80" + weight = 1 diff --git a/integration/fixtures/file/dir/simple1.toml b/integration/fixtures/file/dir/simple1.toml index 0de06d25c..7cb9249b0 100644 --- a/integration/fixtures/file/dir/simple1.toml +++ b/integration/fixtures/file/dir/simple1.toml @@ -1,12 +1,10 @@ -# rules -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://172.17.0.2:80" - weight = 1 - -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] +[routers] + [routers.router1] rule = "Path:/test1" + service = "service1" + +[services] + [services.service1.loadbalancer] + [[services.service1.loadbalancer.servers]] + url = "http://172.17.0.2:80" + weight = 1 diff --git a/integration/fixtures/file/dir/simple2.toml b/integration/fixtures/file/dir/simple2.toml index dcbcffc57..53177b1d6 100644 --- a/integration/fixtures/file/dir/simple2.toml +++ b/integration/fixtures/file/dir/simple2.toml @@ -1,12 +1,10 @@ -# rules -[backends] - [backends.backend2] - [backends.backend2.servers.server1] - url = "http://172.17.0.123:80" - weight = 1 - -[frontends] - [frontends.frontend2] - backend = "backend2" - [frontends.frontend2.routes.test_2] +[routers] + [routers.router2] rule = "Path:/test2" + service = "service2" + +[services] + [services.service2.loadbalancer] + [[services.service2.loadbalancer.servers]] + url = "http://172.17.0.123:80" + weight = 1 diff --git a/integration/fixtures/file/simple.toml b/integration/fixtures/file/simple.toml index e9cb7cc67..bbc095a3d 100644 --- a/integration/fixtures/file/simple.toml +++ b/integration/fixtures/file/simple.toml @@ -2,39 +2,41 @@ defaultEntryPoints = ["http"] [entryPoints] [entryPoints.http] - address = ":8000" + address = ":8000" logLevel = "DEBUG" [file] -# rules -[backends] - [backends.backend1] - [backends.backend1.circuitbreaker] - expression = "NetworkErrorRatio() > 0.5" - [backends.backend1.servers.server1] - url = "http://172.17.0.2:80" - weight = 10 - [backends.backend1.servers.server2] - url = "http://172.17.0.3:80" - weight = 1 - [backends.backend2] - [backends.backend2.LoadBalancer] - method = "drr" - [backends.backend2.servers.server1] - url = "http://172.17.0.4:80" - weight = 1 - [backends.backend2.servers.server2] - url = "http://172.17.0.5:80" - weight = 2 - -[frontends] - [frontends.frontend1] - backend = "backend2" - [frontends.frontend1.routes.test_1] +[routers] + [routers.router1] rule = "Host:test.localhost" - [frontends.frontend2] - backend = "backend1" - [frontends.frontend2.routes.test_2] + service = "service2" + + [routers.router2] rule = "Path:/test" + middlewares = ["circuitbreaker"] + service = "service1" + +[middlewares] + [middlewares.circuitbreaker.circuitbreaker] + expression = "NetworkErrorRatio() > 0.5" + +[services] + [services.service1.loadbalancer] + [[services.service1.loadbalancer.servers]] + url = "http://172.17.0.2:80" + weight = 10 + [[services.service1.loadbalancer.servers]] + url = "http://172.17.0.3:80" + weight = 1 + + [services.service2] + [services.service2.loadbalancer] + method = "drr" + [[services.service2.loadbalancer.servers]] + url = "http://172.17.0.4:80" + weight = 1 + [[services.service2.loadbalancer.servers]] + url = "http://172.17.0.5:80" + weight = 2 diff --git a/integration/fixtures/grpc/config.toml b/integration/fixtures/grpc/config.toml index 79729a042..c9217a90b 100644 --- a/integration/fixtures/grpc/config.toml +++ b/integration/fixtures/grpc/config.toml @@ -2,28 +2,27 @@ defaultEntryPoints = ["https"] rootCAs = [ """{{ .CertContent }}""" ] +debug = true + [entryPoints] [entryPoints.https] - address = ":4443" + address = ":4443" [entryPoints.https.tls] [[entryPoints.https.tls.certificates]] - certFile = """{{ .CertContent }}""" - keyFile = """{{ .KeyContent }}""" - + certFile = """{{ .CertContent }}""" + keyFile = """{{ .KeyContent }}""" [api] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "https://127.0.0.1:{{ .GRPCServerPort }}" - weight = 1 - - -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] +[routers] + [routers.router1] rule = "Host:127.0.0.1" + service = "service1" + +[services] + [services.service1.loadbalancer] + [[services.service1.loadbalancer.servers]] + url = "https://127.0.0.1:{{ .GRPCServerPort }}" + weight = 1 diff --git a/integration/fixtures/grpc/config_h2c.toml b/integration/fixtures/grpc/config_h2c.toml index 7525185dc..f33ec3ce7 100644 --- a/integration/fixtures/grpc/config_h2c.toml +++ b/integration/fixtures/grpc/config_h2c.toml @@ -1,24 +1,22 @@ defaultEntryPoints = ["http"] -debug=true +debug = true [entryPoints] [entryPoints.http] - address = ":8081" + address = ":8081" [api] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "h2c://127.0.0.1:{{ .GRPCServerPort }}" - weight = 1 - - -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] +[routers] + [routers.router1] rule = "Host:127.0.0.1" + service = "service1" + +[services] + [services.service1.loadbalancer] + [[services.service1.loadbalancer.servers]] + url = "h2c://127.0.0.1:{{ .GRPCServerPort }}" + weight = 1 diff --git a/integration/fixtures/grpc/config_h2c_termination.toml b/integration/fixtures/grpc/config_h2c_termination.toml index 41292acd3..4e3b8ef52 100644 --- a/integration/fixtures/grpc/config_h2c_termination.toml +++ b/integration/fixtures/grpc/config_h2c_termination.toml @@ -1,25 +1,26 @@ defaultEntryPoints = ["https"] +debug = true + [entryPoints] [entryPoints.https] - address = ":4443" + address = ":4443" [entryPoints.https.tls] [[entryPoints.https.tls.certificates]] - certFile = """{{ .CertContent }}""" - keyFile = """{{ .KeyContent }}""" + certFile = """{{ .CertContent }}""" + keyFile = """{{ .KeyContent }}""" + [api] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] +[routers] + [routers.router1] + rule = "Host:127.0.0.1" + service = "service1" + +[services] + [services.service1.loadbalancer] + [[services.service1.loadbalancer.servers]] url = "h2c://127.0.0.1:{{ .GRPCServerPort }}" weight = 1 - - -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host:127.0.0.1" diff --git a/integration/fixtures/grpc/config_insecure.toml b/integration/fixtures/grpc/config_insecure.toml index 0ff8a6c1b..87bc9ef46 100644 --- a/integration/fixtures/grpc/config_insecure.toml +++ b/integration/fixtures/grpc/config_insecure.toml @@ -2,28 +2,29 @@ defaultEntryPoints = ["https"] insecureSkipVerify = true +debug = true + [entryPoints] [entryPoints.https] - address = ":4443" + address = ":4443" [entryPoints.https.tls] [[entryPoints.https.tls.certificates]] - certFile = """{{ .CertContent }}""" - keyFile = """{{ .KeyContent }}""" + certFile = """{{ .CertContent }}""" + keyFile = """{{ .KeyContent }}""" [api] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "https://127.0.0.1:{{ .GRPCServerPort }}" - weight = 1 - - -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] +[routers] + [routers.router1] rule = "Host:127.0.0.1" + service = "service1" + + +[services] + [services.service1.loadbalancer] + [[services.service1.loadbalancer.servers]] + url = "https://127.0.0.1:{{ .GRPCServerPort }}" + weight = 1 diff --git a/integration/fixtures/grpc/config_with_flush.toml b/integration/fixtures/grpc/config_with_flush.toml index 9615e3dd2..82e1ec286 100644 --- a/integration/fixtures/grpc/config_with_flush.toml +++ b/integration/fixtures/grpc/config_with_flush.toml @@ -15,17 +15,15 @@ rootCAs = [ """{{ .CertContent }}""" ] [file] -[backends] - [backends.backend1] - [backends.backend1.responseForwarding] - flushInterval="1ms" - [backends.backend1.servers.server1] - url = "https://127.0.0.1:{{ .GRPCServerPort }}" - weight = 1 - - -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] +[routers] + [routers.router1] rule = "Host:127.0.0.1" + service = "service1" + +[services] + [services.service1.loadbalancer] + [services.service1.loadbalancer.responseForwarding] + flushInterval="1ms" + [[services.service1.loadbalancer.servers]] + url = "https://127.0.0.1:{{ .GRPCServerPort }}" + weight = 1 diff --git a/integration/fixtures/healthcheck/multiple-entrypoints-drr.toml b/integration/fixtures/healthcheck/multiple-entrypoints-drr.toml index 6f0c9cf72..3a8af810d 100644 --- a/integration/fixtures/healthcheck/multiple-entrypoints-drr.toml +++ b/integration/fixtures/healthcheck/multiple-entrypoints-drr.toml @@ -4,30 +4,28 @@ logLevel = "DEBUG" [entryPoints] [entryPoints.http1] - address = ":8000" + address = ":8000" [entryPoints.http2] - address = ":9000" + address = ":9000" [api] [file] -[backends] - [backends.backend1] - [backends.backend1.LoadBalancer] - method = "drr" - [backends.backend1.healthcheck] - path = "/health" - interval = "1s" - timeout = "0.9s" - [backends.backend1.servers.server1] - url = "http://{{.Server1}}:80" - weight = 1 - [backends.backend1.servers.server2] - url = "http://{{.Server2}}:80" - weight = 1 - -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] +[routers] + [routers.router1] + service = "service1" rule = "Host:test.localhost" + +[services] + [services.service1.loadbalancer] + method = "drr" + [services.service1.loadbalancer.healthcheck] + path = "/health" + interval = "1s" + timeout = "0.9s" + [[services.service1.loadbalancer.servers]] + url = "http://{{.Server1}}:80" + weight = 1 + [[services.service1.loadbalancer.servers]] + url = "http://{{.Server2}}:80" + weight = 1 diff --git a/integration/fixtures/healthcheck/multiple-entrypoints-wrr.toml b/integration/fixtures/healthcheck/multiple-entrypoints-wrr.toml index 9fba423b4..0fa828a1d 100644 --- a/integration/fixtures/healthcheck/multiple-entrypoints-wrr.toml +++ b/integration/fixtures/healthcheck/multiple-entrypoints-wrr.toml @@ -4,30 +4,29 @@ logLevel = "DEBUG" [entryPoints] [entryPoints.http1] - address = ":8000" + address = ":8000" [entryPoints.http2] - address = ":9000" + address = ":9000" [api] [file] -[backends] - [backends.backend1] - [backends.backend1.LoadBalancer] - method = "wrr" - [backends.backend1.healthcheck] - path = "/health" - interval = "1s" - timeout = "0.9s" - [backends.backend1.servers.server1] - url = "http://{{.Server1}}:80" - weight = 1 - [backends.backend1.servers.server2] - url = "http://{{.Server2}}:80" - weight = 1 -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] +[routers] + [routers.router1] + service = "service1" rule = "Host:test.localhost" + +[services] + [services.service1.loadbalancer] + method = "wrr" + [services.service1.loadbalancer.healthcheck] + path = "/health" + interval = "1s" + timeout = "0.9s" + [[services.service1.loadbalancer.servers]] + url = "http://{{.Server1}}:80" + weight = 1 + [[services.service1.loadbalancer.servers]] + url = "http://{{.Server2}}:80" + weight = 1 diff --git a/integration/fixtures/healthcheck/port_overload.toml b/integration/fixtures/healthcheck/port_overload.toml index 45fcf8feb..8aa104afd 100644 --- a/integration/fixtures/healthcheck/port_overload.toml +++ b/integration/fixtures/healthcheck/port_overload.toml @@ -4,24 +4,25 @@ logLevel = "DEBUG" [entryPoints] [entryPoints.http] - address = ":8000" + address = ":8000" [api] [file] -[backends] - [backends.backend1] - [backends.backend1.healthcheck] - path = "/health" - port = 80 - interval = "1s" - timeout = "0.9s" - [backends.backend1.servers.server1] - url = "http://{{.Server1}}:81" - weight = 1 -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] +[routers] + [routers.router1] + service = "service1" rule = "Host:test.localhost" + +[services] + [services.service1.loadbalancer] + method = "drr" + [services.service1.loadbalancer.healthcheck] + path = "/health" + port = 80 + interval = "1s" + timeout = "0.9s" + [[services.service1.loadbalancer.servers]] + url = "http://{{.Server1}}:81" + weight = 1 diff --git a/integration/fixtures/healthcheck/simple.toml b/integration/fixtures/healthcheck/simple.toml index ddd47e030..77108efb5 100644 --- a/integration/fixtures/healthcheck/simple.toml +++ b/integration/fixtures/healthcheck/simple.toml @@ -4,26 +4,26 @@ logLevel = "DEBUG" [entryPoints] [entryPoints.http] - address = ":8000" + address = ":8000" [api] [file] -[backends] - [backends.backend1] - [backends.backend1.healthcheck] - path = "/health" - interval = "1s" - timeout = "0.9s" - [backends.backend1.servers.server1] - url = "http://{{.Server1}}:80" - weight = 1 - [backends.backend1.servers.server2] - url = "http://{{.Server2}}:80" - weight = 1 -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] +[routers] + [routers.router1] + service = "service1" rule = "Host:test.localhost" + +[services] + [services.service1.loadbalancer] + [services.service1.loadbalancer.healthcheck] + path = "/health" + interval = "1s" + timeout = "0.9s" + [[services.service1.loadbalancer.servers]] + url = "http://{{.Server1}}:80" + weight = 1 + [[services.service1.loadbalancer.servers]] + url = "http://{{.Server2}}:80" + weight = 1 diff --git a/integration/fixtures/https/clientca/https_1ca1config.toml b/integration/fixtures/https/clientca/https_1ca1config.toml index 3030fea31..b5215e302 100644 --- a/integration/fixtures/https/clientca/https_1ca1config.toml +++ b/integration/fixtures/https/clientca/https_1ca1config.toml @@ -20,22 +20,24 @@ defaultEntryPoints = ["https"] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://127.0.0.1:9010" - weight = 1 - [backends.backend2] - [backends.backend2.servers.server1] - url = "http://127.0.0.1:9020" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Host:snitest.com" + [Routers.router2] + Service = "service2" + Rule = "Host:snitest.org" -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host:snitest.com" - [frontends.frontend2] - backend = "backend2" - [frontends.frontend2.routes.test_2] - rule = "Host:snitest.org" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9010" + Weight = 1 + [Services.service2] + [Services.service2.LoadBalancer] + + [[Services.service2.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9020" + Weight = 1 diff --git a/integration/fixtures/https/clientca/https_2ca1config.toml b/integration/fixtures/https/clientca/https_2ca1config.toml index 96abe1ada..4331463e6 100644 --- a/integration/fixtures/https/clientca/https_2ca1config.toml +++ b/integration/fixtures/https/clientca/https_2ca1config.toml @@ -19,22 +19,24 @@ defaultEntryPoints = ["https"] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://127.0.0.1:9010" - weight = 1 - [backends.backend2] - [backends.backend2.servers.server1] - url = "http://127.0.0.1:9020" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Host:snitest.com" + [Routers.router2] + Service = "service2" + Rule = "Host:snitest.org" -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host:snitest.com" - [frontends.frontend2] - backend = "backend2" - [frontends.frontend2.routes.test_2] - rule = "Host:snitest.org" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9010" + Weight = 1 + [Services.service2] + [Services.service2.LoadBalancer] + + [[Services.service2.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9020" + Weight = 1 diff --git a/integration/fixtures/https/clientca/https_2ca2config.toml b/integration/fixtures/https/clientca/https_2ca2config.toml index 9f4541965..af654c056 100644 --- a/integration/fixtures/https/clientca/https_2ca2config.toml +++ b/integration/fixtures/https/clientca/https_2ca2config.toml @@ -19,23 +19,24 @@ defaultEntryPoints = ["https"] [api] [file] +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Host:snitest.com" + [Routers.router2] + Service = "service2" + Rule = "Host:snitest.org" -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://127.0.0.1:9010" - weight = 1 - [backends.backend2] - [backends.backend2.servers.server1] - url = "http://127.0.0.1:9020" - weight = 1 +[Services] + [Services.service1] + [Services.service1.LoadBalancer] -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host:snitest.com" - [frontends.frontend2] - backend = "backend2" - [frontends.frontend2.routes.test_2] - rule = "Host:snitest.org" + [[Services.service1.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9010" + Weight = 1 + [Services.service2] + [Services.service2.LoadBalancer] + + [[Services.service2.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9020" + Weight = 1 diff --git a/integration/fixtures/https/dynamic_https.toml b/integration/fixtures/https/dynamic_https.toml index 264552b02..c1942f519 100644 --- a/integration/fixtures/https/dynamic_https.toml +++ b/integration/fixtures/https/dynamic_https.toml @@ -1,22 +1,24 @@ -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://127.0.0.1:9010" - weight = 1 - [backends.backend2] - [backends.backend2.servers.server1] - url = "http://127.0.0.1:9020" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Host:snitest.com" + [Routers.router2] + Service = "service2" + Rule = "Host:snitest.org" -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host:snitest.com" - [frontends.frontend2] - backend = "backend2" - [frontends.frontend2.routes.test_2] - rule = "Host:snitest.org" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9010" + Weight = 1 + [Services.service2] + [Services.service2.LoadBalancer] + + [[Services.service2.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9020" + Weight = 1 [[tls]] entryPoints = ["https"] diff --git a/integration/fixtures/https/dynamic_https_sni.toml b/integration/fixtures/https/dynamic_https_sni.toml index d7a023e6d..9a91e80d0 100644 --- a/integration/fixtures/https/dynamic_https_sni.toml +++ b/integration/fixtures/https/dynamic_https_sni.toml @@ -6,6 +6,7 @@ defaultEntryPoints = ["https"] [entryPoints.https] address = ":4443" [entryPoints.https.tls] + [entryPoints.https02] address = ":8443" [entryPoints.https02.tls] @@ -15,4 +16,4 @@ defaultEntryPoints = ["https"] [file] fileName = "{{.DynamicConfFileName}}" -watch = true \ No newline at end of file +watch = true diff --git a/integration/fixtures/https/dynamic_https_sni_default_cert.toml b/integration/fixtures/https/dynamic_https_sni_default_cert.toml index b0e9a98cc..1ccf4d969 100644 --- a/integration/fixtures/https/dynamic_https_sni_default_cert.toml +++ b/integration/fixtures/https/dynamic_https_sni_default_cert.toml @@ -14,21 +14,21 @@ defaultEntryPoints = ["https"] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://127.0.0.1:9010" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Host:snitest.com" + [Routers.router2] + Service = "service1" + Rule = "Host:www.snitest.com" -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host:snitest.com" - [frontends.frontend2] - backend = "backend1" - [frontends.frontend2.routes.test_1] - rule = "Host:www.snitest.com" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9010" + Weight = 1 [[tls]] entryPoints = ["https"] diff --git a/integration/fixtures/https/https_redirect.toml b/integration/fixtures/https/https_redirect.toml index fc0cdf67c..e9ab70847 100644 --- a/integration/fixtures/https/https_redirect.toml +++ b/integration/fixtures/https/https_redirect.toml @@ -5,8 +5,10 @@ defaultEntryPoints = ["http", "https"] [entryPoints] [entryPoints.http] address = ":8888" + [entryPoints.http.redirect] entryPoint = "https" + [entryPoints.https] address = ":8443" [entryPoints.https.tls] @@ -15,56 +17,88 @@ defaultEntryPoints = ["http", "https"] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://127.0.0.1:80" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Middlewares = ["redirect-https"] + Rule = "Host: example.com" -[frontends] + [Routers.router2] + Service = "service1" + Middlewares = ["redirect-https", "api-slash-strip"] + Rule = "Host: example2.com" - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host: example.com; PathPrefixStrip: /api" - [frontends.frontend2] - backend = "backend1" - [frontends.frontend2.routes.test_1] - rule = "Host: example2.com; PathPrefixStrip: /api/" + [Routers.router3] + Service = "service1" + Middlewares = ["redirect-https", "foo-add-prefix"] + Rule = "Host: test.com" - [frontends.frontend3] - backend = "backend1" - [frontends.frontend3.routes.test_1] - rule = "Host: test.com; AddPrefix: /foo" - [frontends.frontend4] - backend = "backend1" - [frontends.frontend4.routes.test_1] - rule = "Host: test2.com; AddPrefix: /foo/" + [Routers.router4] + Service = "service1" + Middlewares = ["redirect-https", "foo-slash-add-prefix"] + Rule = "Host: test2.com" - [frontends.frontend5] - backend = "backend1" - [frontends.frontend5.routes.test_1] - rule = "Host: foo.com; PathPrefixStripRegex: /{id:[a-z]+}" - [frontends.frontend6] - backend = "backend1" - [frontends.frontend6.routes.test_1] - rule = "Host: foo2.com; PathPrefixStripRegex: /{id:[a-z]+}/" + [Routers.router5] + Service = "service1" + Middlewares = ["redirect-https", "id-strip-regex-prefix"] + Rule = "Host: foo.com" - [frontends.frontend7] - backend = "backend1" - [frontends.frontend7.routes.test_1] - rule = "Host: bar.com; ReplacePathRegex: /api /" - [frontends.frontend8] - backend = "backend1" - [frontends.frontend8.routes.test_1] - rule = "Host: bar2.com; ReplacePathRegex: /api/ /" + [Routers.router6] + Service = "service1" + Middlewares = ["redirect-https", "id-slash-strip-regex-prefix"] + Rule = "Host: foo2.com" - [frontends.frontend9] - backend = "backend1" - [frontends.frontend9.routes.test_1] - rule = "Host: pow.com; ReplacePath: /api" - [frontends.frontend10] - backend = "backend1" - [frontends.frontend10.routes.test_1] - rule = "Host: pow2.com; ReplacePath: /api/" + [Routers.router7] + Service = "service1" + Middlewares = ["redirect-https", "api-regex-replace"] + Rule = "Host: bar.com" + [Routers.router8] + Service = "service1" + Middlewares = ["redirect-https", "api-slash-regex-replace"] + Rule = "Host: bar2.com" + + [Routers.router9] + Service = "service1" + Middlewares = ["redirect-https", "api-replace-path"] + Rule = "Host: pow.com" + + [Routers.router10] + Service = "service1" + Middlewares = ["redirect-https", "api-slash-replace-path"] + Rule = "Host: pow2.com" + +[Middlewares] + [Middlewares.api-strip.StripPrefix] + prefixes = ["/api"] + [Middlewares.api-slash-strip.StripPrefix] + prefixes = ["/api/"] + [Middlewares.foo-add-prefix.AddPrefix] + prefix = "/foo" + [Middlewares.foo-slash-add-prefix.AddPrefix] + prefix = "/foo/" + [Middlewares.id-strip-regex-prefix.StripPrefixRegex] + regex = ["/{id:[a-z]+}"] + [Middlewares.id-slash-strip-regex-prefix.StripPrefixRegex] + regex = ["/{id:[a-z]+}/"] + [Middlewares.api-regex-replace.ReplacePathRegex] + regex = "/api" + replacement = "/" + [Middlewares.api-slash-regex-replace.ReplacePathRegex] + regex = "/api/" + replacement = "/" + [Middlewares.api-replace-path.ReplacePath] + path = "/api" + [Middlewares.api-slash-replace-path.ReplacePath] + path = "/api/" + [Middlewares.redirect-https.redirect] + regex = "^(?:https?://)?([\\w\\._-]+)(?::\\d+)?(.*)$" + replacement = "https://${1}:8443${2}" + +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "http://127.0.0.1:80" + Weight = 1 diff --git a/integration/fixtures/https/https_sni.toml b/integration/fixtures/https/https_sni.toml index e840259b0..0b4108955 100644 --- a/integration/fixtures/https/https_sni.toml +++ b/integration/fixtures/https/https_sni.toml @@ -17,22 +17,24 @@ defaultEntryPoints = ["https"] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://127.0.0.1:9010" - weight = 1 - [backends.backend2] - [backends.backend2.servers.server1] - url = "http://127.0.0.1:9020" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Host:snitest.com" + [Routers.router2] + Service = "service2" + Rule = "Host:snitest.org" -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host:snitest.com" - [frontends.frontend2] - backend = "backend2" - [frontends.frontend2.routes.test_2] - rule = "Host:snitest.org" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9010" + Weight = 1 + [Services.service2] + [Services.service2.LoadBalancer] + + [[Services.service2.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9020" + Weight = 1 diff --git a/integration/fixtures/https/https_sni_default_cert.toml b/integration/fixtures/https/https_sni_default_cert.toml index 17430aa6c..df5c24202 100644 --- a/integration/fixtures/https/https_sni_default_cert.toml +++ b/integration/fixtures/https/https_sni_default_cert.toml @@ -20,18 +20,18 @@ defaultEntryPoints = ["https"] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://127.0.0.1:9010" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Host:snitest.com" + [Routers.router2] + Service = "service1" + Rule = "Host:www.snitest.com" -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host:snitest.com" - [frontends.frontend2] - backend = "backend1" - [frontends.frontend2.routes.test_1] - rule = "Host:www.snitest.com" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9010" + Weight = 1 diff --git a/integration/fixtures/https/https_sni_strict.toml b/integration/fixtures/https/https_sni_strict.toml index 068bc8eca..5aba7ac3b 100644 --- a/integration/fixtures/https/https_sni_strict.toml +++ b/integration/fixtures/https/https_sni_strict.toml @@ -15,14 +15,15 @@ defaultEntryPoints = ["https"] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://127.0.0.1:9010" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Host:snitest.com" -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host:snitest.com" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "http://127.0.0.1:9010" + Weight = 1 diff --git a/integration/fixtures/https/rootcas/https.toml b/integration/fixtures/https/rootcas/https.toml index e29329249..630240596 100644 --- a/integration/fixtures/https/rootcas/https.toml +++ b/integration/fixtures/https/rootcas/https.toml @@ -28,14 +28,15 @@ fblo6RBxUQ== [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "{{ .BackendHost }}" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Path: /ping" -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Path: /ping" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "{{ .BackendHost }}" + Weight = 1 diff --git a/integration/fixtures/https/rootcas/https_with_file.toml b/integration/fixtures/https/rootcas/https_with_file.toml index cd26a49b4..80bfa3d01 100644 --- a/integration/fixtures/https/rootcas/https_with_file.toml +++ b/integration/fixtures/https/rootcas/https_with_file.toml @@ -13,14 +13,15 @@ rootCAs = [ "fixtures/https/rootcas/local.crt"] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "{{ .BackendHost }}" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Path: /ping" -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Path: /ping" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "{{ .BackendHost }}" + Weight = 1 diff --git a/integration/fixtures/log_rotation_config.toml b/integration/fixtures/log_rotation_config.toml index c24dce4a9..e68421ddc 100644 --- a/integration/fixtures/log_rotation_config.toml +++ b/integration/fixtures/log_rotation_config.toml @@ -27,14 +27,15 @@ entryPoint = "api" ################################################################ # rules ################################################################ -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://127.0.0.1:8081" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Path: /test1" -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Path: /test1" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "http://127.0.0.1:8081" + Weight = 1 diff --git a/integration/fixtures/multiple_provider.toml b/integration/fixtures/multiple_provider.toml index 1c89a1017..0950114fd 100644 --- a/integration/fixtures/multiple_provider.toml +++ b/integration/fixtures/multiple_provider.toml @@ -14,14 +14,15 @@ watch = true exposedByDefault = false [file] - [frontends] - [frontends.frontend-1] - backend = "backend-test" - [frontends.frontend-1.routes.test_1] - rule = "PathPrefix:/file" +[Routers] + [Routers.router-1] + Service = "service-test" + Rule = "PathPrefix:/file" - [backends] - [backends.backend-test] - [backends.backend-test.servers.website] - url = "http://{{ .IP }}" - weight = 1 +[Services] + [Services.service-test] + [Services.service-test.LoadBalancer] + + [[Services.service-test.LoadBalancer.Servers]] + URL = "http://{{ .IP }}" + Weight = 1 diff --git a/integration/fixtures/proxy-protocol/with.toml b/integration/fixtures/proxy-protocol/with.toml index f5a1ef57a..ed0b43204 100644 --- a/integration/fixtures/proxy-protocol/with.toml +++ b/integration/fixtures/proxy-protocol/with.toml @@ -3,22 +3,23 @@ defaultEntryPoints = ["http"] [entryPoints] [entryPoints.http] - address = ":8000" + address = ":8000" [entryPoints.http.proxyProtocol] - trustedIPs = ["{{.HaproxyIP}}"] + trustedIPs = ["{{.HaproxyIP}}"] [api] [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://{{.WhoamiIP}}" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Path:/whoami" -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Path:/whoami" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "http://{{.WhoamiIP}}" + Weight = 1 diff --git a/integration/fixtures/proxy-protocol/without.toml b/integration/fixtures/proxy-protocol/without.toml index 666c441a4..ebbe2277b 100644 --- a/integration/fixtures/proxy-protocol/without.toml +++ b/integration/fixtures/proxy-protocol/without.toml @@ -3,22 +3,22 @@ defaultEntryPoints = ["http"] [entryPoints] [entryPoints.http] - address = ":8000" + address = ":8000" [entryPoints.http.proxyProtocol] - trustedIPs = ["1.2.3.4"] + trustedIPs = ["1.2.3.4"] [api] [file] +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Path:/whoami" -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://{{.WhoamiIP}}" - weight = 1 +[Services] + [Services.service1] + [Services.service1.LoadBalancer] -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Path:/whoami" + [[Services.service1.LoadBalancer.Servers]] + URL = "http://{{.WhoamiIP}}" + Weight = 1 diff --git a/integration/fixtures/ratelimit/simple.toml b/integration/fixtures/ratelimit/simple.toml index 99d3525f6..8d367ccf9 100644 --- a/integration/fixtures/ratelimit/simple.toml +++ b/integration/fixtures/ratelimit/simple.toml @@ -8,25 +8,29 @@ logLevel = "DEBUG" [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://{{.Server1}}:80" - weight = 1 -[frontends] - [frontends.frontend1] - passHostHeader = true - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Path:/" - [frontends.frontend1.ratelimit] +[Routers] + [Routers.router1] + Service = "service1" + Middlewares = [ "ratelimit" ] + Rule = "Path:/" + +[Middlewares] + [Middlewares.ratelimit.RateLimit] extractorfunc = "client.ip" - [frontends.frontend1.ratelimit.rateset.rateset1] + [Middlewares.ratelimit.RateLimit.rateset.rateset1] period = "60s" average = 4 burst = 5 - [frontends.frontend1.ratelimit.rateset.rateset2] + [Middlewares.ratelimit.RateLimit.rateset.rateset2] period = "3s" average = 1 burst = 2 + +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + passHostHeader = true + [[Services.service1.LoadBalancer.Servers]] + URL = "http://{{.Server1}}:80" + Weight = 1 diff --git a/integration/fixtures/reqacceptgrace.toml b/integration/fixtures/reqacceptgrace.toml index 67df4326c..985e00a5c 100644 --- a/integration/fixtures/reqacceptgrace.toml +++ b/integration/fixtures/reqacceptgrace.toml @@ -13,16 +13,17 @@ logLevel = "DEBUG" requestAcceptGraceTimeout = "10s" [file] -[backends] - [backends.backend] - [backends.backend.servers.server] - url = "{{.Server}}" - weight = 1 +[Routers] + [Routers.router] + Service = "service" + Rule = "Path:/service" -[frontends] - [frontends.frontend] - backend = "backend" - [frontends.frontend.routes.service] - rule = "Path:/service" +[Services] + [Services.service] + [Services.service.LoadBalancer] + + [[Services.service.LoadBalancer.Servers]] + URL = "{{.Server}}" + Weight = 1 [ping] diff --git a/integration/fixtures/retry/simple.toml b/integration/fixtures/retry/simple.toml index 4bdd52552..c09e66a6a 100644 --- a/integration/fixtures/retry/simple.toml +++ b/integration/fixtures/retry/simple.toml @@ -8,20 +8,25 @@ logLevel = "DEBUG" [api] -[retry] - [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://{{.WhoamiEndpoint}}:8080" # not valid - weight = 1 - [backends.backend1.servers.server2] - url = "http://{{.WhoamiEndpoint}}:80" - weight = 1 -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "PathPrefix:/" +[Routers] + [Routers.router1] + Service = "service1" + Middlewares = [ "retry" ] + Rule = "PathPrefix:/" + +[Middlewares.retry.Retry] +Attempts = 3 + +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + + [[Services.service1.LoadBalancer.Servers]] + URL = "http://{{.WhoamiEndpoint}}:8080" + Weight = 1 + + [[Services.service1.LoadBalancer.Servers]] + URL = "http://{{.WhoamiEndpoint}}:80" + Weight = 1 diff --git a/integration/fixtures/simple_auth.toml b/integration/fixtures/simple_auth.toml index 4131e42af..ec8ce8661 100644 --- a/integration/fixtures/simple_auth.toml +++ b/integration/fixtures/simple_auth.toml @@ -7,10 +7,12 @@ defaultEntryPoints = ["http"] [entryPoints.traefik] address = ":8001" - [entryPoints.traefik.auth.basic] - users = ["test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"] [api] + middlewares = ["authentication"] + +[middleware.authentication.basic-auth] + users = ["test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"] [ping] diff --git a/integration/fixtures/simple_stats.toml b/integration/fixtures/simple_stats.toml index d749fa4c4..c003f4cfb 100644 --- a/integration/fixtures/simple_stats.toml +++ b/integration/fixtures/simple_stats.toml @@ -8,26 +8,26 @@ debug=true [file] - [backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "{{ .Server1 }}" - weight = 1 +[Routers] + [Routers.router1] + EntryPoints = ["http"] + Service = "service1" + Rule = "PathPrefix:/whoami" - [backends.backend2] - [backends.backend2.servers.server1] - url = "{{ .Server2 }}" - weight = 1 + [Routers.router2] + EntryPoints = ["traefik"] + Service = "service2" + Rule = "PathPrefix:/whoami" - [frontends] - [frontends.frontend1] - entrypoints=["http"] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "PathPrefix:/whoami" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + [[Services.service1.LoadBalancer.Servers]] + URL = "{{ .Server1 }}" + Weight = 1 - [frontends.frontend2] - backend = "backend2" - entrypoints=["traefik"] - [frontends.frontend2.routes.test_1] - rule = "PathPrefix:/whoami" + [Services.service2] + [Services.service2.LoadBalancer] + [[Services.service2.LoadBalancer.Servers]] + URL = "{{ .Server2 }}" + Weight = 1 diff --git a/integration/fixtures/timeout/forwarding_timeouts.toml b/integration/fixtures/timeout/forwarding_timeouts.toml index 73b3673e9..768bde1d3 100644 --- a/integration/fixtures/timeout/forwarding_timeouts.toml +++ b/integration/fixtures/timeout/forwarding_timeouts.toml @@ -3,37 +3,37 @@ defaultEntryPoints = ["http"] [entryPoints] [entryPoints.http] - address = ":8000" + address = ":8000" [accessLog] -format = "json" + format = "json" [api] [forwardingTimeouts] -dialTimeout = "300ms" -responseHeaderTimeout = "300ms" + dialTimeout = "300ms" + responseHeaderTimeout = "300ms" [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - # Non-routable IP address that should always deliver a dial timeout. - # See: https://stackoverflow.com/questions/100841/artificially-create-a-connection-timeout-error#answer-904609 - url = "http://50.255.255.1" - weight = 1 - [backends.backend2] - [backends.backend2.servers.server2] - url = "http://{{.TimeoutEndpoint}}:9000" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Path:/dialTimeout" -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Path:/dialTimeout" - [frontends.frontend2] - backend = "backend2" - [frontends.frontend2.routes.test_2] - rule = "Path:/responseHeaderTimeout" + [Routers.router2] + Service = "service2" + Rule = "Path:/responseHeaderTimeout" + +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + [[Services.service1.LoadBalancer.Servers]] + URL = "http://50.255.255.1" + Weight = 1 + + [Services.service2] + [Services.service2.LoadBalancer] + [[Services.service2.LoadBalancer.Servers]] + URL = "http://{{.TimeoutEndpoint}}:9000" + Weight = 1 diff --git a/integration/fixtures/tracing/simple.toml b/integration/fixtures/tracing/simple.toml index 74b802298..ebf5a1f96 100644 --- a/integration/fixtures/tracing/simple.toml +++ b/integration/fixtures/tracing/simple.toml @@ -19,53 +19,57 @@ debug = true samplingType = "const" samplingParam = 1.0 -[retry] - attempts = 3 - [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server-ratelimit] - url = "http://{{.WhoAmiIP}}:{{.WhoAmiPort}}" - weight = 1 - [backends.backend2] - [backends.backend2.servers.server-retry] - url = "http://{{.WhoAmiIP}}:{{.WhoAmiPort}}" - weight = 1 - [backends.backend3] - [backends.backend3.servers.server-auth] - url = "http://{{.WhoAmiIP}}:{{.WhoAmiPort}}" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Middlewares = ["retry", "ratelimit"] + Rule = "Path:/ratelimit" + [Routers.router2] + Service = "service2" + Middlewares = ["retry"] + Rule = "Path:/retry" + [Routers.router3] + Service = "service3" + Middlewares = ["retry", "basic-auth"] + Rule = "Path:/auth" -[frontends] - [frontends.frontend1] +[Middlewares] + [Middlewares.retry.retry] + attempts = 3 + [Middlewares.basic-auth.BasicAuth] + users = ["test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"] + [Middlewares.ratelimit.RateLimit] + extractorfunc = "client.ip" + [Middlewares.ratelimit.RateLimit.rateset.rateset1] + period = "60s" + average = 4 + burst = 5 + [Middlewares.ratelimit.RateLimit.rateset.rateset2] + period = "3s" + average = 1 + burst = 2 + + +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + passHostHeader = true + [[Services.service1.LoadBalancer.Servers]] + URL = "http://{{.WhoAmiIP}}:{{.WhoAmiPort}}" + Weight = 1 + + [Services.service2] passHostHeader = true - backend = "backend1" - [frontends.frontend1.routes.test_ratelimit] - rule = "Path:/ratelimit" - [frontends.frontend1.ratelimit] - extractorfunc = "client.ip" - [frontends.frontend1.ratelimit.rateset.rateset1] - period = "60s" - average = 4 - burst = 5 - [frontends.frontend1.ratelimit.rateset.rateset2] - period = "3s" - average = 1 - burst = 2 - [frontends.frontend2] + [Services.service2.LoadBalancer] + [[Services.service2.LoadBalancer.Servers]] + URL = "http://{{.WhoAmiIP}}:{{.WhoAmiPort}}" + Weight = 1 + + [Services.service3] passHostHeader = true - backend = "backend2" - [frontends.frontend2.routes.test_retry] - rule = "Path:/retry" - [frontends.frontend3] - passHostHeader = true - backend = "backend3" - [frontends.frontend3.auth.basic] - users = [ - "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - ] - [frontends.frontend3.routes.test_auth] - rule = "Path:/auth" + [Services.service3.LoadBalancer] + [[Services.service3.LoadBalancer.Servers]] + URL = "http://{{.WhoAmiIP}}:{{.WhoAmiPort}}" + Weight = 1 diff --git a/integration/fixtures/traefik_log_config.toml b/integration/fixtures/traefik_log_config.toml index 74a46ace0..7d7f90c0f 100644 --- a/integration/fixtures/traefik_log_config.toml +++ b/integration/fixtures/traefik_log_config.toml @@ -22,4 +22,4 @@ checkNewVersion = false [docker] exposedByDefault = false domain = "docker.local" - watch = true \ No newline at end of file + watch = true diff --git a/integration/fixtures/websocket/config.toml b/integration/fixtures/websocket/config.toml index 480ca01fc..bb643d609 100644 --- a/integration/fixtures/websocket/config.toml +++ b/integration/fixtures/websocket/config.toml @@ -10,15 +10,15 @@ logLevel = "DEBUG" [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "{{ .WebsocketServer }}" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "PathPrefix:/ws" -[frontends] - [frontends.frontend1] - backend = "backend1" - passHostHeader = true - [frontends.frontend1.routes.test_1] - rule = "PathPrefix:/ws" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + passHostHeader = true + [[Services.service1.LoadBalancer.Servers]] + URL = "{{ .WebsocketServer }}" + Weight = 1 diff --git a/integration/fixtures/websocket/config_https.toml b/integration/fixtures/websocket/config_https.toml index 8f9c01c01..e02599a1d 100644 --- a/integration/fixtures/websocket/config_https.toml +++ b/integration/fixtures/websocket/config_https.toml @@ -15,15 +15,15 @@ insecureSkipVerify=true [file] -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "{{ .WebsocketServer }}" - weight = 1 +[Routers] + [Routers.router1] + Service = "service1" + Rule = "Path:/echo,/ws" -[frontends] - [frontends.frontend1] - backend = "backend1" - passHostHeader = true - [frontends.frontend1.routes.test_1] - rule = "Path:/echo,/ws" +[Services] + [Services.service1] + [Services.service1.LoadBalancer] + PassHostHeader = true + [[Services.service1.LoadBalancer.Servers]] + URL = "{{ .WebsocketServer }}" + Weight = 1 diff --git a/integration/grpc_test.go b/integration/grpc_test.go index a623621d1..597b829a9 100644 --- a/integration/grpc_test.go +++ b/integration/grpc_test.go @@ -167,7 +167,7 @@ func (s *GRPCSuite) TestGRPC(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) c.Assert(err, check.IsNil) var response string @@ -205,7 +205,7 @@ func (s *GRPCSuite) TestGRPCh2c(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) c.Assert(err, check.IsNil) var response string @@ -247,7 +247,7 @@ func (s *GRPCSuite) TestGRPCh2cTermination(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) c.Assert(err, check.IsNil) var response string @@ -289,7 +289,7 @@ func (s *GRPCSuite) TestGRPCInsecure(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) c.Assert(err, check.IsNil) var response string @@ -336,7 +336,7 @@ func (s *GRPCSuite) TestGRPCBuffer(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) c.Assert(err, check.IsNil) var client helloworld.Greeter_StreamExampleClient client, closer, err := callStreamExampleClientGRPC() @@ -364,7 +364,7 @@ func (s *GRPCSuite) TestGRPCBuffer(c *check.C) { func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) { stopStreamExample := make(chan bool) - defer func() { stopStreamExample <- true }() + lis, err := net.Listen("tcp", ":0") c.Assert(err, check.IsNil) _, port, err := net.SplitHostPort(lis.Addr().String()) @@ -387,21 +387,22 @@ func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) { KeyContent: string(LocalhostKey), GRPCServerPort: port, }) - defer os.Remove(file) + cmd, display := s.traefikCmd(withConfigFile(file)) defer display(c) - err = cmd.Start() c.Assert(err, check.IsNil) defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) c.Assert(err, check.IsNil) + var client helloworld.Greeter_StreamExampleClient client, closer, err := callStreamExampleClientGRPC() defer closer() + defer func() { stopStreamExample <- true }() c.Assert(err, check.IsNil) received := make(chan bool) @@ -412,7 +413,7 @@ func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) { received <- true }() - err = try.Do(time.Millisecond*100, func() error { + err = try.Do(100*time.Millisecond, func() error { select { case <-received: return nil diff --git a/integration/healthcheck_test.go b/integration/healthcheck_test.go index 455b20f59..75f2f1b4a 100644 --- a/integration/healthcheck_test.go +++ b/integration/healthcheck_test.go @@ -41,7 +41,7 @@ func (s *HealthCheckSuite) TestSimpleConfiguration(c *check.C) { defer cmd.Process.Kill() // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("Host:test.localhost")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 60*time.Second, try.BodyContains("Host:test.localhost")) c.Assert(err, checker.IsNil) frontendHealthReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/health", nil) @@ -117,7 +117,7 @@ func (s *HealthCheckSuite) doTestMultipleEntrypoints(c *check.C, fixture string) defer cmd.Process.Kill() // Wait for traefik - err = try.GetRequest("http://localhost:8080/api/providers", 60*time.Second, try.BodyContains("Host:test.localhost")) + err = try.GetRequest("http://localhost:8080/api/providers/file/routers", 60*time.Second, try.BodyContains("Host:test.localhost")) c.Assert(err, checker.IsNil) // Check entrypoint http1 @@ -147,7 +147,7 @@ func (s *HealthCheckSuite) doTestMultipleEntrypoints(c *check.C, fixture string) } // Verify no backend service is available due to failing health checks - err = try.Request(frontendHealthReq, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable)) + err = try.Request(frontendHealthReq, 5*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable)) c.Assert(err, checker.IsNil) // reactivate the whoami2 @@ -194,7 +194,7 @@ func (s *HealthCheckSuite) TestPortOverload(c *check.C) { defer cmd.Process.Kill() // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("Host:test.localhost")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 10*time.Second, try.BodyContains("Host:test.localhost")) c.Assert(err, checker.IsNil) frontendHealthReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/health", nil) diff --git a/integration/https_test.go b/integration/https_test.go index 1eade1045..1d940b1ab 100644 --- a/integration/https_test.go +++ b/integration/https_test.go @@ -12,8 +12,8 @@ import ( "github.com/BurntSushi/toml" "github.com/containous/traefik/integration/try" + "github.com/containous/traefik/old/types" traefiktls "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" "github.com/go-check/check" checker "github.com/vdemeester/shakers" ) @@ -32,7 +32,7 @@ func (s *HTTPSSuite) TestWithSNIConfigHandshake(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 500*time.Millisecond, try.BodyContains("Host:snitest.org")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 500*time.Millisecond, try.BodyContains("Host:snitest.org")) c.Assert(err, checker.IsNil) tlsConfig := &tls.Config{ @@ -66,7 +66,7 @@ func (s *HTTPSSuite) TestWithSNIConfigRoute(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:snitest.org")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("Host:snitest.org")) c.Assert(err, checker.IsNil) backend1 := startTestServer("9010", http.StatusNoContent) @@ -122,7 +122,7 @@ func (s *HTTPSSuite) TestWithSNIStrictNotMatchedRequest(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 500*time.Millisecond, try.BodyContains("Host:snitest.com")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 500*time.Millisecond, try.BodyContains("Host:snitest.com")) c.Assert(err, checker.IsNil) tlsConfig := &tls.Config{ @@ -146,7 +146,7 @@ func (s *HTTPSSuite) TestWithDefaultCertificate(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 500*time.Millisecond, try.BodyContains("Host:snitest.com")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 500*time.Millisecond, try.BodyContains("Host:snitest.com")) c.Assert(err, checker.IsNil) tlsConfig := &tls.Config{ @@ -180,7 +180,7 @@ func (s *HTTPSSuite) TestWithDefaultCertificateNoSNI(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 500*time.Millisecond, try.BodyContains("Host:snitest.com")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 500*time.Millisecond, try.BodyContains("Host:snitest.com")) c.Assert(err, checker.IsNil) tlsConfig := &tls.Config{ @@ -214,7 +214,7 @@ func (s *HTTPSSuite) TestWithOverlappingStaticCertificate(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 500*time.Millisecond, try.BodyContains("Host:snitest.com")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 500*time.Millisecond, try.BodyContains("Host:snitest.com")) c.Assert(err, checker.IsNil) tlsConfig := &tls.Config{ @@ -249,7 +249,7 @@ func (s *HTTPSSuite) TestWithOverlappingDynamicCertificate(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 500*time.Millisecond, try.BodyContains("Host:snitest.com")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 500*time.Millisecond, try.BodyContains("Host:snitest.com")) c.Assert(err, checker.IsNil) tlsConfig := &tls.Config{ @@ -282,7 +282,7 @@ func (s *HTTPSSuite) TestWithClientCertificateAuthentication(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 500*time.Millisecond, try.BodyContains("Host:snitest.org")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 500*time.Millisecond, try.BodyContains("Host:snitest.org")) c.Assert(err, checker.IsNil) tlsConfig := &tls.Config{ @@ -338,7 +338,7 @@ func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAs(c *check. defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 500*time.Millisecond, try.BodyContains("Host:snitest.org")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("Host:snitest.org")) c.Assert(err, checker.IsNil) tlsConfig := &tls.Config{ @@ -399,7 +399,7 @@ func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAsMultipleFi defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1000*time.Millisecond, try.BodyContains("Host:snitest.org")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("Host:snitest.org")) c.Assert(err, checker.IsNil) tlsConfig := &tls.Config{ @@ -464,7 +464,7 @@ func (s *HTTPSSuite) TestWithRootCAsContentForHTTPSOnBackend(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains(backend.URL)) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/services", 1*time.Second, try.BodyContains(backend.URL)) c.Assert(err, checker.IsNil) err = try.GetRequest("http://127.0.0.1:8081/ping", 1*time.Second, try.StatusCodeIs(http.StatusOK)) @@ -486,7 +486,7 @@ func (s *HTTPSSuite) TestWithRootCAsFileForHTTPSOnBackend(c *check.C) { defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains(backend.URL)) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/services", 1*time.Second, try.BodyContains(backend.URL)) c.Assert(err, checker.IsNil) err = try.GetRequest("http://127.0.0.1:8081/ping", 1*time.Second, try.StatusCodeIs(http.StatusOK)) @@ -544,7 +544,7 @@ func (s *HTTPSSuite) TestWithSNIDynamicConfigRouteWithNoChange(c *check.C) { } // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:"+tr1.TLSClientConfig.ServerName)) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("Host:"+tr1.TLSClientConfig.ServerName)) c.Assert(err, checker.IsNil) backend1 := startTestServer("9010", http.StatusNoContent) @@ -613,7 +613,7 @@ func (s *HTTPSSuite) TestWithSNIDynamicConfigRouteWithChange(c *check.C) { } // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:"+tr2.TLSClientConfig.ServerName)) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("Host:"+tr2.TLSClientConfig.ServerName)) c.Assert(err, checker.IsNil) backend1 := startTestServer("9010", http.StatusNoContent) @@ -676,7 +676,7 @@ func (s *HTTPSSuite) TestWithSNIDynamicConfigRouteWithTlsConfigurationDeletion(c } // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:"+tr2.TLSClientConfig.ServerName)) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("Host:"+tr2.TLSClientConfig.ServerName)) c.Assert(err, checker.IsNil) backend2 := startTestServer("9020", http.StatusResetContent) @@ -741,7 +741,7 @@ func (s *HTTPSSuite) TestEntrypointHttpsRedirectAndPathModification(c *check.C) defer cmd.Process.Kill() // wait for Traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1000*time.Millisecond, try.BodyContains("Host: example.com")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 5*time.Second, try.BodyContains("Host: example.com")) c.Assert(err, checker.IsNil) client := &http.Client{ diff --git a/integration/integration_test.go b/integration/integration_test.go index 880301d73..e6f7aee2f 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -37,29 +37,35 @@ func init() { if *container { // tests launched from a container - check.Suite(&AccessLogSuite{}) + + // FIXME Provider tests + // check.Suite(&ConsulCatalogSuite{}) + // check.Suite(&ConsulSuite{}) + // check.Suite(&DockerComposeSuite{}) + // check.Suite(&DockerSuite{}) + // check.Suite(&DynamoDBSuite{}) + // check.Suite(&EurekaSuite{}) + // check.Suite(&MarathonSuite{}) + // check.Suite(&MarathonSuite15{}) + // check.Suite(&MesosSuite{}) + + // FIXME use docker + // check.Suite(&AccessLogSuite{}) + // check.Suite(&ConstraintSuite{}) + // check.Suite(&TLSClientHeadersSuite{}) + // check.Suite(&HostResolverSuite{}) + // check.Suite(&LogRotationSuite{}) + + // FIXME e2e tests check.Suite(&AcmeSuite{}) - check.Suite(&ConstraintSuite{}) - check.Suite(&ConsulCatalogSuite{}) - check.Suite(&ConsulSuite{}) - check.Suite(&DockerComposeSuite{}) - check.Suite(&DockerSuite{}) - check.Suite(&DynamoDBSuite{}) check.Suite(&ErrorPagesSuite{}) - check.Suite(&EurekaSuite{}) check.Suite(&FileSuite{}) check.Suite(&GRPCSuite{}) check.Suite(&HealthCheckSuite{}) - check.Suite(&HostResolverSuite{}) check.Suite(&HTTPSSuite{}) - check.Suite(&LogRotationSuite{}) - check.Suite(&MarathonSuite{}) - check.Suite(&MarathonSuite15{}) - check.Suite(&MesosSuite{}) check.Suite(&RateLimitSuite{}) check.Suite(&RetrySuite{}) check.Suite(&SimpleSuite{}) - check.Suite(&TLSClientHeadersSuite{}) check.Suite(&TimeoutSuite{}) check.Suite(&TracingSuite{}) check.Suite(&WebsocketSuite{}) @@ -67,7 +73,9 @@ func init() { if *host { // tests launched from the host check.Suite(&ProxyProtocolSuite{}) - check.Suite(&Etcd3Suite{}) + + // FIXME Provider tests + // check.Suite(&Etcd3Suite{}) } } @@ -125,10 +133,10 @@ func (s *BaseSuite) traefikCmd(args ...string) (*exec.Cmd, func(*check.C)) { func (s *BaseSuite) displayTraefikLog(c *check.C, output *bytes.Buffer) { if output == nil || output.Len() == 0 { - log.Printf("%s: No Traefik logs.", c.TestName()) + log.Infof("%s: No Traefik logs.", c.TestName()) } else { - log.Printf("%s: Traefik logs: ", c.TestName()) - log.Println(output.String()) + log.Infof("%s: Traefik logs: ", c.TestName()) + log.Infof(output.String()) } } diff --git a/integration/marathon15_test.go b/integration/marathon15_test.go index a2a201df0..4d2c22026 100644 --- a/integration/marathon15_test.go +++ b/integration/marathon15_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/containous/traefik/integration/try" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" "github.com/gambol99/go-marathon" "github.com/go-check/check" checker "github.com/vdemeester/shakers" diff --git a/integration/marathon_test.go b/integration/marathon_test.go index 91e2d6ba1..9e7b44072 100644 --- a/integration/marathon_test.go +++ b/integration/marathon_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/containous/traefik/integration/try" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" "github.com/gambol99/go-marathon" "github.com/go-check/check" checker "github.com/vdemeester/shakers" diff --git a/integration/retry_test.go b/integration/retry_test.go index ca935779e..e5d73e5d8 100644 --- a/integration/retry_test.go +++ b/integration/retry_test.go @@ -31,7 +31,7 @@ func (s *RetrySuite) TestRetry(c *check.C) { c.Assert(err, checker.IsNil) defer cmd.Process.Kill() - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("PathPrefix:/")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 60*time.Second, try.BodyContains("PathPrefix:/")) c.Assert(err, checker.IsNil) // This simulates a DialTimeout when connecting to the backend server. @@ -53,7 +53,7 @@ func (s *RetrySuite) TestRetryWebsocket(c *check.C) { c.Assert(err, checker.IsNil) defer cmd.Process.Kill() - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("PathPrefix:/")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 60*time.Second, try.BodyContains("PathPrefix:/")) c.Assert(err, checker.IsNil) // This simulates a DialTimeout when connecting to the backend server. diff --git a/integration/basic_test.go b/integration/simple_test.go similarity index 89% rename from integration/basic_test.go rename to integration/simple_test.go index f691ee79c..4ceaf814b 100644 --- a/integration/basic_test.go +++ b/integration/simple_test.go @@ -57,7 +57,7 @@ func (s *SimpleSuite) TestWithWebConfig(c *check.C) { c.Assert(err, checker.IsNil) defer cmd.Process.Kill() - err = try.GetRequest("http://127.0.0.1:8080/api", 1*time.Second, try.StatusCodeIs(http.StatusOK)) + err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.StatusCodeIs(http.StatusOK)) c.Assert(err, checker.IsNil) } @@ -175,6 +175,8 @@ func (s *SimpleSuite) TestRequestAcceptGraceTimeout(c *check.C) { } func (s *SimpleSuite) TestApiOnSameEntryPoint(c *check.C) { + c.Skip("Use docker") + s.createComposeProject(c, "base") s.composeProject.Start(c) @@ -193,7 +195,7 @@ func (s *SimpleSuite) TestApiOnSameEntryPoint(c *check.C) { err = try.GetRequest("http://127.0.0.1:8000/api", 1*time.Second, try.StatusCodeIs(http.StatusOK)) c.Assert(err, checker.IsNil) - err = try.GetRequest("http://127.0.0.1:8000/api/providers", 1*time.Second, try.BodyContains("PathPrefix")) + err = try.GetRequest("http://127.0.0.1:8000/api/providers/file/routers", 1*time.Second, try.BodyContains("PathPrefix")) c.Assert(err, checker.IsNil) err = try.GetRequest("http://127.0.0.1:8000/whoami", 1*time.Second, try.StatusCodeIs(http.StatusOK)) @@ -201,6 +203,8 @@ func (s *SimpleSuite) TestApiOnSameEntryPoint(c *check.C) { } func (s *SimpleSuite) TestStatsWithMultipleEntryPoint(c *check.C) { + c.Skip("Use docker") + s.createComposeProject(c, "stats") s.composeProject.Start(c) @@ -221,7 +225,7 @@ func (s *SimpleSuite) TestStatsWithMultipleEntryPoint(c *check.C) { err = try.GetRequest("http://127.0.0.1:8080/api", 1*time.Second, try.StatusCodeIs(http.StatusOK)) c.Assert(err, checker.IsNil) - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("PathPrefix")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("PathPrefix")) c.Assert(err, checker.IsNil) err = try.GetRequest("http://127.0.0.1:8000/whoami", 1*time.Second, try.StatusCodeIs(http.StatusOK)) @@ -236,6 +240,7 @@ func (s *SimpleSuite) TestStatsWithMultipleEntryPoint(c *check.C) { } func (s *SimpleSuite) TestNoAuthOnPing(c *check.C) { + c.Skip("Middlewares on entryPoint don't work anymore") s.createComposeProject(c, "base") s.composeProject.Start(c) @@ -254,6 +259,8 @@ func (s *SimpleSuite) TestNoAuthOnPing(c *check.C) { } func (s *SimpleSuite) TestDefaultEntrypointHTTP(c *check.C) { + c.Skip("Use docker") + s.createComposeProject(c, "base") s.composeProject.Start(c) @@ -264,7 +271,7 @@ func (s *SimpleSuite) TestDefaultEntrypointHTTP(c *check.C) { c.Assert(err, checker.IsNil) defer cmd.Process.Kill() - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("PathPrefix")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("PathPrefix")) c.Assert(err, checker.IsNil) err = try.GetRequest("http://127.0.0.1:8000/whoami", 1*time.Second, try.StatusCodeIs(http.StatusOK)) @@ -272,6 +279,8 @@ func (s *SimpleSuite) TestDefaultEntrypointHTTP(c *check.C) { } func (s *SimpleSuite) TestWithUnexistingEntrypoint(c *check.C) { + c.Skip("Use docker") + s.createComposeProject(c, "base") s.composeProject.Start(c) @@ -282,7 +291,7 @@ func (s *SimpleSuite) TestWithUnexistingEntrypoint(c *check.C) { c.Assert(err, checker.IsNil) defer cmd.Process.Kill() - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("PathPrefix")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("PathPrefix")) c.Assert(err, checker.IsNil) err = try.GetRequest("http://127.0.0.1:8000/whoami", 1*time.Second, try.StatusCodeIs(http.StatusOK)) @@ -290,6 +299,8 @@ func (s *SimpleSuite) TestWithUnexistingEntrypoint(c *check.C) { } func (s *SimpleSuite) TestMetricsPrometheusDefaultEntrypoint(c *check.C) { + c.Skip("Use docker") + s.createComposeProject(c, "base") s.composeProject.Start(c) @@ -300,7 +311,7 @@ func (s *SimpleSuite) TestMetricsPrometheusDefaultEntrypoint(c *check.C) { c.Assert(err, checker.IsNil) defer cmd.Process.Kill() - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("PathPrefix")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("PathPrefix")) c.Assert(err, checker.IsNil) err = try.GetRequest("http://127.0.0.1:8000/whoami", 1*time.Second, try.StatusCodeIs(http.StatusOK)) @@ -311,6 +322,8 @@ func (s *SimpleSuite) TestMetricsPrometheusDefaultEntrypoint(c *check.C) { } func (s *SimpleSuite) TestMultipleProviderSameBackendName(c *check.C) { + c.Skip("Use docker") + s.createComposeProject(c, "base") s.composeProject.Start(c) @@ -328,7 +341,7 @@ func (s *SimpleSuite) TestMultipleProviderSameBackendName(c *check.C) { c.Assert(err, checker.IsNil) defer cmd.Process.Kill() - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("PathPrefix")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("PathPrefix")) c.Assert(err, checker.IsNil) err = try.GetRequest("http://127.0.0.1:8000/whoami", 1*time.Second, try.BodyContains(ipWhoami01)) @@ -340,6 +353,8 @@ func (s *SimpleSuite) TestMultipleProviderSameBackendName(c *check.C) { } func (s *SimpleSuite) TestIPStrategyWhitelist(c *check.C) { + c.Skip("Use docker") + s.createComposeProject(c, "whitelist") s.composeProject.Start(c) @@ -350,7 +365,7 @@ func (s *SimpleSuite) TestIPStrategyWhitelist(c *check.C) { c.Assert(err, checker.IsNil) defer cmd.Process.Kill() - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("override")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 1*time.Second, try.BodyContains("override")) c.Assert(err, checker.IsNil) testCases := []struct { @@ -416,30 +431,6 @@ func (s *SimpleSuite) TestIPStrategyWhitelist(c *check.C) { } } -func (s *SimpleSuite) TestDontKeepTrailingSlash(c *check.C) { - file := s.adaptFile(c, "fixtures/keep_trailing_slash.toml", struct { - KeepTrailingSlash bool - }{false}) - defer os.Remove(file) - - cmd, output := s.traefikCmd(withConfigFile(file)) - defer output(c) - - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - oldCheckRedirect := http.DefaultClient.CheckRedirect - http.DefaultClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - } - - err = try.GetRequest("http://127.0.0.1:8000/test/foo/", 1*time.Second, try.StatusCodeIs(http.StatusMovedPermanently)) - c.Assert(err, checker.IsNil) - - http.DefaultClient.CheckRedirect = oldCheckRedirect -} - func (s *SimpleSuite) TestKeepTrailingSlash(c *check.C) { file := s.adaptFile(c, "fixtures/keep_trailing_slash.toml", struct { KeepTrailingSlash bool diff --git a/integration/timeout_test.go b/integration/timeout_test.go index 78482d122..6ab433b89 100644 --- a/integration/timeout_test.go +++ b/integration/timeout_test.go @@ -30,7 +30,7 @@ func (s *TimeoutSuite) TestForwardingTimeouts(c *check.C) { c.Assert(err, checker.IsNil) defer cmd.Process.Kill() - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("Path:/dialTimeout")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/routers", 60*time.Second, try.BodyContains("Path:/dialTimeout")) c.Assert(err, checker.IsNil) // This simulates a DialTimeout when connecting to the backend server. diff --git a/integration/tracing_test.go b/integration/tracing_test.go index eeacd6db4..22c37bb62 100644 --- a/integration/tracing_test.go +++ b/integration/tracing_test.go @@ -85,7 +85,7 @@ func (s *TracingSuite) TestZipkinRateLimit(c *check.C) { err = try.GetRequest("http://127.0.0.1:8000/ratelimit", 500*time.Millisecond, try.StatusCodeIs(http.StatusTooManyRequests)) c.Assert(err, checker.IsNil) - err = try.GetRequest("http://"+s.ZipkinIP+":9411/api/v2/spans?serviceName=tracing", 20*time.Second, try.BodyContains("forward frontend1/backend1", "rate limit")) + err = try.GetRequest("http://"+s.ZipkinIP+":9411/api/v2/spans?serviceName=tracing", 20*time.Second, try.BodyContains("forward service1/router1", "ratelimit")) c.Assert(err, checker.IsNil) } @@ -109,7 +109,7 @@ func (s *TracingSuite) TestZipkinRetry(c *check.C) { err = try.GetRequest("http://127.0.0.1:8000/retry", 500*time.Millisecond, try.StatusCodeIs(http.StatusBadGateway)) c.Assert(err, checker.IsNil) - err = try.GetRequest("http://"+s.ZipkinIP+":9411/api/v2/spans?serviceName=tracing", 20*time.Second, try.BodyContains("forward frontend2/backend2", "retry")) + err = try.GetRequest("http://"+s.ZipkinIP+":9411/api/v2/spans?serviceName=tracing", 20*time.Second, try.BodyContains("forward service2/router2", "retry")) c.Assert(err, checker.IsNil) } @@ -132,6 +132,6 @@ func (s *TracingSuite) TestZipkinAuth(c *check.C) { err = try.GetRequest("http://127.0.0.1:8000/auth", 500*time.Millisecond, try.StatusCodeIs(http.StatusUnauthorized)) c.Assert(err, checker.IsNil) - err = try.GetRequest("http://"+s.ZipkinIP+":9411/api/v2/spans?serviceName=tracing", 20*time.Second, try.BodyContains("entrypoint http", "auth basic")) + err = try.GetRequest("http://"+s.ZipkinIP+":9411/api/v2/spans?serviceName=tracing", 20*time.Second, try.BodyContains("entrypoint http", "basic-auth")) c.Assert(err, checker.IsNil) } diff --git a/integration/websocket_test.go b/integration/websocket_test.go index f06583bfb..e423ae6d8 100644 --- a/integration/websocket_test.go +++ b/integration/websocket_test.go @@ -57,7 +57,7 @@ func (s *WebsocketSuite) TestBase(c *check.C) { defer cmd.Process.Kill() // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/services", 10*time.Second, try.BodyContains("127.0.0.1")) c.Assert(err, checker.IsNil) conn, _, err := gorillawebsocket.DefaultDialer.Dial("ws://127.0.0.1:8000/ws", nil) @@ -107,7 +107,7 @@ func (s *WebsocketSuite) TestWrongOrigin(c *check.C) { defer cmd.Process.Kill() // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/services", 10*time.Second, try.BodyContains("127.0.0.1")) c.Assert(err, checker.IsNil) config, err := websocket.NewConfig("ws://127.0.0.1:8000/ws", "ws://127.0.0.1:800") @@ -157,7 +157,7 @@ func (s *WebsocketSuite) TestOrigin(c *check.C) { defer cmd.Process.Kill() // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/services", 10*time.Second, try.BodyContains("127.0.0.1")) c.Assert(err, checker.IsNil) config, err := websocket.NewConfig("ws://127.0.0.1:8000/ws", "ws://127.0.0.1:8000") @@ -218,7 +218,7 @@ func (s *WebsocketSuite) TestWrongOriginIgnoredByServer(c *check.C) { defer cmd.Process.Kill() // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/services", 10*time.Second, try.BodyContains("127.0.0.1")) c.Assert(err, checker.IsNil) config, err := websocket.NewConfig("ws://127.0.0.1:8000/ws", "ws://127.0.0.1:80") @@ -276,7 +276,7 @@ func (s *WebsocketSuite) TestSSLTermination(c *check.C) { defer cmd.Process.Kill() // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/services", 10*time.Second, try.BodyContains("127.0.0.1")) c.Assert(err, checker.IsNil) // Add client self-signed cert @@ -339,7 +339,7 @@ func (s *WebsocketSuite) TestBasicAuth(c *check.C) { defer cmd.Process.Kill() // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/services", 10*time.Second, try.BodyContains("127.0.0.1")) c.Assert(err, checker.IsNil) config, err := websocket.NewConfig("ws://127.0.0.1:8000/ws", "ws://127.0.0.1:8000") @@ -383,7 +383,7 @@ func (s *WebsocketSuite) TestSpecificResponseFromBackend(c *check.C) { defer cmd.Process.Kill() // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/services", 10*time.Second, try.BodyContains("127.0.0.1")) c.Assert(err, checker.IsNil) _, resp, err := gorillawebsocket.DefaultDialer.Dial("ws://127.0.0.1:8000/ws", nil) @@ -429,7 +429,7 @@ func (s *WebsocketSuite) TestURLWithURLEncodedChar(c *check.C) { defer cmd.Process.Kill() // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/services", 10*time.Second, try.BodyContains("127.0.0.1")) c.Assert(err, checker.IsNil) conn, _, err := gorillawebsocket.DefaultDialer.Dial("ws://127.0.0.1:8000/ws/http%3A%2F%2Ftest", nil) @@ -484,7 +484,7 @@ func (s *WebsocketSuite) TestSSLhttp2(c *check.C) { defer cmd.Process.Kill() // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/services", 10*time.Second, try.BodyContains("127.0.0.1")) c.Assert(err, checker.IsNil) // Add client self-signed cert @@ -543,7 +543,7 @@ func (s *WebsocketSuite) TestHeaderAreForwared(c *check.C) { defer cmd.Process.Kill() // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1")) + err = try.GetRequest("http://127.0.0.1:8080/api/providers/file/services", 10*time.Second, try.BodyContains("127.0.0.1")) c.Assert(err, checker.IsNil) headers := http.Header{} diff --git a/log/deprecated.go b/log/deprecated.go new file mode 100644 index 000000000..0bf50c743 --- /dev/null +++ b/log/deprecated.go @@ -0,0 +1,80 @@ +package log + +import "github.com/sirupsen/logrus" + +// Debug logs a message at level Debug on the standard logger. +// Deprecated +func Debug(args ...interface{}) { + mainLogger.Debug(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +// Deprecated +func Debugf(format string, args ...interface{}) { + mainLogger.Debugf(format, args...) +} + +// Info logs a message at level Info on the standard logger. +// Deprecated +func Info(args ...interface{}) { + mainLogger.Info(args...) +} + +// Infof logs a message at level Info on the standard logger. +// Deprecated +func Infof(format string, args ...interface{}) { + mainLogger.Infof(format, args...) +} + +// Warn logs a message at level Warn on the standard logger. +// Deprecated +func Warn(args ...interface{}) { + mainLogger.Warn(args...) +} + +// Warnf logs a message at level Warn on the standard logger. +// Deprecated +func Warnf(format string, args ...interface{}) { + mainLogger.Warnf(format, args...) +} + +// Error logs a message at level Error on the standard logger. +// Deprecated +func Error(args ...interface{}) { + mainLogger.Error(args...) +} + +// Errorf logs a message at level Error on the standard logger. +// Deprecated +func Errorf(format string, args ...interface{}) { + mainLogger.Errorf(format, args...) +} + +// Panic logs a message at level Panic on the standard logger. +// Deprecated +func Panic(args ...interface{}) { + mainLogger.Panic(args...) +} + +// Panicf logs a message at level Panic on the standard logger. +// Deprecated +func Panicf(format string, args ...interface{}) { + mainLogger.Panicf(format, args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +// Deprecated +func Fatal(args ...interface{}) { + mainLogger.Fatal(args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +// Deprecated +func Fatalf(format string, args ...interface{}) { + mainLogger.Fatalf(format, args...) +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook logrus.Hook) { + logrus.AddHook(hook) +} diff --git a/log/fields.go b/log/fields.go new file mode 100644 index 000000000..43a29f020 --- /dev/null +++ b/log/fields.go @@ -0,0 +1,14 @@ +package log + +// Log entry name +const ( + EntryPointName = "entryPointName" + RouterName = "routerName" + MiddlewareName = "middlewareName" + MiddlewareType = "middlewareType" + ProviderName = "providerName" + ServiceName = "serviceName" + MetricsProviderName = "metricsProviderName" + TracingProviderName = "tracingProviderName" + ServerName = "serverName" +) diff --git a/log/log.go b/log/log.go new file mode 100644 index 000000000..49f7736d6 --- /dev/null +++ b/log/log.go @@ -0,0 +1,145 @@ +package log + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/sirupsen/logrus" +) + +type contextKey int + +const ( + loggerKey contextKey = iota +) + +// Logger the Traefik logger +type Logger interface { + logrus.FieldLogger + WriterLevel(logrus.Level) *io.PipeWriter +} + +var ( + mainLogger Logger + logFilePath string + logFile *os.File +) + +func init() { + mainLogger = logrus.StandardLogger() + logrus.SetOutput(os.Stdout) +} + +// SetLogger sets the logger. +func SetLogger(l Logger) { + mainLogger = l +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + logrus.SetOutput(out) +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter logrus.Formatter) { + logrus.SetFormatter(formatter) +} + +// SetLevel sets the standard logger level. +func SetLevel(level logrus.Level) { + logrus.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() logrus.Level { + return logrus.GetLevel() +} + +// Str adds a string field +func Str(key, value string) func(logrus.Fields) { + return func(fields logrus.Fields) { + fields[key] = value + } +} + +// With Adds fields +func With(ctx context.Context, opts ...func(logrus.Fields)) context.Context { + logger := FromContext(ctx) + + fields := make(logrus.Fields) + for _, opt := range opts { + opt(fields) + } + logger = logger.WithFields(fields) + + return context.WithValue(ctx, loggerKey, logger) +} + +// FromContext Gets the logger from context +func FromContext(ctx context.Context) Logger { + if ctx == nil { + panic("nil context") + } + + logger, ok := ctx.Value(loggerKey).(Logger) + if !ok { + logger = mainLogger + } + + return logger +} + +// WithoutContext Gets the main logger +func WithoutContext() Logger { + return mainLogger +} + +// OpenFile opens the log file using the specified path +func OpenFile(path string) error { + logFilePath = path + + var err error + logFile, err = os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + if err != nil { + return err + } + + SetOutput(logFile) + return nil +} + +// CloseFile closes the log and sets the Output to stdout +func CloseFile() error { + logrus.SetOutput(os.Stdout) + + if logFile != nil { + return logFile.Close() + } + return nil +} + +// RotateFile closes and reopens the log file to allow for rotation +// by an external source. If the log isn't backed by a file then +// it does nothing. +func RotateFile() error { + logger := FromContext(context.Background()) + + if logFile == nil && logFilePath == "" { + logger.Debug("Traefik log is not writing to a file, ignoring rotate request") + return nil + } + + if logFile != nil { + defer func(f *os.File) { + _ = f.Close() + }(logFile) + } + + if err := OpenFile(logFilePath); err != nil { + return fmt.Errorf("error opening log file: %s", err) + } + + return nil +} diff --git a/log/log_test.go b/log/log_test.go new file mode 100644 index 000000000..e4ce53221 --- /dev/null +++ b/log/log_test.go @@ -0,0 +1,58 @@ +package log + +import ( + "bytes" + "context" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLog(t *testing.T) { + testCases := []struct { + desc string + fields map[string]string + expected string + }{ + { + desc: "Log with one field", + fields: map[string]string{ + "foo": "bar", + }, + expected: ` level=error msg="message test" foo=bar$`, + }, + { + desc: "Log with two fields", + fields: map[string]string{ + "foo": "bar", + "oof": "rab", + }, + expected: ` level=error msg="message test" foo=bar oof=rab$`, + }, + { + desc: "Log without field", + fields: map[string]string{}, + expected: ` level=error msg="message test"$`, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + + var buffer bytes.Buffer + SetOutput(&buffer) + + ctx := context.Background() + + for key, value := range test.fields { + ctx = With(ctx, Str(key, value)) + } + + FromContext(ctx).Error("message test") + + assert.Regexp(t, test.expected, strings.TrimSpace(buffer.String())) + }) + } +} diff --git a/metrics/datadog.go b/metrics/datadog.go index ccd4ffc62..ca468fb9a 100644 --- a/metrics/datadog.go +++ b/metrics/datadog.go @@ -1,6 +1,7 @@ package metrics import ( + "context" "time" "github.com/containous/traefik/log" @@ -11,7 +12,7 @@ import ( ) var datadogClient = dogstatsd.New("traefik.", kitlog.LoggerFunc(func(keyvals ...interface{}) error { - log.Info(keyvals) + log.WithoutContext().WithField(log.MetricsProviderName, "datadog").Info(keyvals) return nil })) @@ -34,9 +35,9 @@ const ( ) // RegisterDatadog registers the metrics pusher if this didn't happen yet and creates a datadog Registry instance. -func RegisterDatadog(config *types.Datadog) Registry { +func RegisterDatadog(ctx context.Context, config *types.Datadog) Registry { if datadogTicker == nil { - datadogTicker = initDatadogClient(config) + datadogTicker = initDatadogClient(ctx, config) } registry := &standardRegistry{ @@ -58,14 +59,14 @@ func RegisterDatadog(config *types.Datadog) Registry { return registry } -func initDatadogClient(config *types.Datadog) *time.Ticker { +func initDatadogClient(ctx context.Context, config *types.Datadog) *time.Ticker { address := config.Address if len(address) == 0 { address = "localhost:8125" } pushInterval, err := time.ParseDuration(config.PushInterval) if err != nil { - log.Warnf("Unable to parse %s into pushInterval, using 10s as default value", config.PushInterval) + log.FromContext(ctx).Warnf("Unable to parse %s from config.PushInterval: using 10s as the default value", config.PushInterval) pushInterval = 10 * time.Second } diff --git a/metrics/datadog_test.go b/metrics/datadog_test.go index 604a2ea16..568835d1e 100644 --- a/metrics/datadog_test.go +++ b/metrics/datadog_test.go @@ -1,6 +1,7 @@ package metrics import ( + "context" "net/http" "strconv" "testing" @@ -15,7 +16,7 @@ func TestDatadog(t *testing.T) { // This is needed to make sure that UDP Listener listens for data a bit longer, otherwise it will quit after a millisecond udp.Timeout = 5 * time.Second - datadogRegistry := RegisterDatadog(&types.Datadog{Address: ":18125", PushInterval: "1s"}) + datadogRegistry := RegisterDatadog(context.Background(), &types.Datadog{Address: ":18125", PushInterval: "1s"}) defer StopDatadog() if !datadogRegistry.IsEnabled() { diff --git a/metrics/influxdb.go b/metrics/influxdb.go index 5dec81374..af6df79a5 100644 --- a/metrics/influxdb.go +++ b/metrics/influxdb.go @@ -2,6 +2,7 @@ package metrics import ( "bytes" + "context" "fmt" "net/url" "regexp" @@ -39,13 +40,18 @@ const ( influxDBServerUpName = "traefik.backend.server.up" ) +const ( + protocolHTTP = "http" + protocolUDP = "udp" +) + // RegisterInfluxDB registers the metrics pusher if this didn't happen yet and creates a InfluxDB Registry instance. -func RegisterInfluxDB(config *types.InfluxDB) Registry { +func RegisterInfluxDB(ctx context.Context, config *types.InfluxDB) Registry { if influxDBClient == nil { - influxDBClient = initInfluxDBClient(config) + influxDBClient = initInfluxDBClient(ctx, config) } if influxDBTicker == nil { - influxDBTicker = initInfluxDBTicker(config) + influxDBTicker = initInfluxDBTicker(ctx, config) } return &standardRegistry{ @@ -66,30 +72,32 @@ func RegisterInfluxDB(config *types.InfluxDB) Registry { } // initInfluxDBTicker creates a influxDBClient -func initInfluxDBClient(config *types.InfluxDB) *influx.Influx { +func initInfluxDBClient(ctx context.Context, config *types.InfluxDB) *influx.Influx { + logger := log.FromContext(ctx) + // TODO deprecated: move this switch into configuration.SetEffectiveConfiguration when web provider will be removed. switch config.Protocol { - case "udp": + case protocolUDP: if len(config.Database) > 0 || len(config.RetentionPolicy) > 0 { - log.Warn("Database and RetentionPolicy are only used when protocol is http.") + logger.Warn("Database and RetentionPolicy options have no effect with UDP.") config.Database = "" config.RetentionPolicy = "" } - case "http": + case protocolHTTP: if u, err := url.Parse(config.Address); err == nil { if u.Scheme != "http" && u.Scheme != "https" { - log.Warnf("InfluxDB address %s should specify a scheme of http or https, defaulting to http.", config.Address) + logger.Warnf("InfluxDB address %s should specify a scheme (http or https): falling back on HTTP.", config.Address) config.Address = "http://" + config.Address } } else { - log.Errorf("Unable to parse influxdb address: %v, defaulting to udp.", err) - config.Protocol = "udp" + logger.Errorf("Unable to parse the InfluxDB address %v: falling back on UDP.", err) + config.Protocol = protocolUDP config.Database = "" config.RetentionPolicy = "" } default: - log.Warnf("Unsupported protocol: %s, defaulting to udp.", config.Protocol) - config.Protocol = "udp" + logger.Warnf("Unsupported protocol %s: falling back on UDP.", config.Protocol) + config.Protocol = protocolUDP config.Database = "" config.RetentionPolicy = "" } @@ -101,16 +109,16 @@ func initInfluxDBClient(config *types.InfluxDB) *influx.Influx { RetentionPolicy: config.RetentionPolicy, }, kitlog.LoggerFunc(func(keyvals ...interface{}) error { - log.Info(keyvals) + log.WithoutContext().WithField(log.MetricsProviderName, "influxdb").Info(keyvals) return nil })) } // initInfluxDBTicker initializes metrics pusher -func initInfluxDBTicker(config *types.InfluxDB) *time.Ticker { +func initInfluxDBTicker(ctx context.Context, config *types.InfluxDB) *time.Ticker { pushInterval, err := time.ParseDuration(config.PushInterval) if err != nil { - log.Warnf("Unable to parse %s into pushInterval, using 10s as default value", config.PushInterval) + log.FromContext(ctx).Warnf("Unable to parse %s from config.PushInterval: using 10s as the default value", config.PushInterval) pushInterval = 10 * time.Second } @@ -144,8 +152,10 @@ func (w *influxDBWriter) Write(bp influxdb.BatchPoints) error { defer c.Close() if writeErr := c.Write(bp); writeErr != nil { - log.Errorf("Error writing to influx: %s", writeErr.Error()) - if handleErr := w.handleWriteError(c, writeErr); handleErr != nil { + ctx := log.With(context.Background(), log.Str(log.MetricsProviderName, "influxdb")) + log.FromContext(ctx).Errorf("Error while writing to InfluxDB: %s", writeErr.Error()) + + if handleErr := w.handleWriteError(ctx, c, writeErr); handleErr != nil { return handleErr } // Retry write after successful handling of writeErr @@ -168,8 +178,8 @@ func (w *influxDBWriter) initWriteClient() (influxdb.Client, error) { }) } -func (w *influxDBWriter) handleWriteError(c influxdb.Client, writeErr error) error { - if w.config.Protocol != "http" { +func (w *influxDBWriter) handleWriteError(ctx context.Context, c influxdb.Client, writeErr error) error { + if w.config.Protocol != protocolHTTP { return writeErr } @@ -184,7 +194,9 @@ func (w *influxDBWriter) handleWriteError(c influxdb.Client, writeErr error) err qStr = fmt.Sprintf("%s WITH NAME \"%s\"", qStr, w.config.RetentionPolicy) } - log.Debugf("Influx database does not exist, attempting to create with query: %s", qStr) + logger := log.FromContext(ctx) + + logger.Debugf("InfluxDB database not found: attempting to create one with %s", qStr) q := influxdb.NewQuery(qStr, "", "") response, queryErr := c.Query(q) @@ -192,10 +204,10 @@ func (w *influxDBWriter) handleWriteError(c influxdb.Client, writeErr error) err queryErr = response.Error() } if queryErr != nil { - log.Errorf("Error creating InfluxDB database: %s", queryErr) + logger.Errorf("Error while creating the InfluxDB database %s", queryErr) return queryErr } - log.Debugf("Successfully created influx database: %s", w.config.Database) + logger.Debugf("Successfully created the InfluxDB database %s", w.config.Database) return nil } diff --git a/metrics/influxdb_test.go b/metrics/influxdb_test.go index 7604416cb..b045127f2 100644 --- a/metrics/influxdb_test.go +++ b/metrics/influxdb_test.go @@ -1,6 +1,7 @@ package metrics import ( + "context" "fmt" "io/ioutil" "net/http" @@ -19,7 +20,7 @@ func TestInfluxDB(t *testing.T) { // This is needed to make sure that UDP Listener listens for data a bit longer, otherwise it will quit after a millisecond udp.Timeout = 5 * time.Second - influxDBRegistry := RegisterInfluxDB(&types.InfluxDB{Address: ":8089", PushInterval: "1s"}) + influxDBRegistry := RegisterInfluxDB(context.Background(), &types.InfluxDB{Address: ":8089", PushInterval: "1s"}) defer StopInfluxDB() if !influxDBRegistry.IsEnabled() { @@ -79,7 +80,7 @@ func TestInfluxDBHTTP(t *testing.T) { })) defer ts.Close() - influxDBRegistry := RegisterInfluxDB(&types.InfluxDB{Address: ts.URL, Protocol: "http", PushInterval: "1s", Database: "test", RetentionPolicy: "autogen"}) + influxDBRegistry := RegisterInfluxDB(context.Background(), &types.InfluxDB{Address: ts.URL, Protocol: "http", PushInterval: "1s", Database: "test", RetentionPolicy: "autogen"}) defer StopInfluxDB() if !influxDBRegistry.IsEnabled() { diff --git a/metrics/prometheus.go b/metrics/prometheus.go index c9660082d..d7236ce9d 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -1,12 +1,14 @@ package metrics import ( + "context" "net/http" "sort" "strings" "sync" "github.com/containous/mux" + "github.com/containous/traefik/config" "github.com/containous/traefik/log" "github.com/containous/traefik/safe" "github.com/containous/traefik/types" @@ -60,17 +62,17 @@ var promState = newPrometheusState() // PrometheusHandler exposes Prometheus routes. type PrometheusHandler struct{} -// AddRoutes adds Prometheus routes on a router. -func (h PrometheusHandler) AddRoutes(router *mux.Router) { +// Append adds Prometheus routes on a router. +func (h PrometheusHandler) Append(router *mux.Router) { router.Methods(http.MethodGet).Path("/metrics").Handler(promhttp.Handler()) } // RegisterPrometheus registers all Prometheus metrics. // It must be called only once and failing to register the metrics will lead to a panic. -func RegisterPrometheus(config *types.Prometheus) Registry { +func RegisterPrometheus(ctx context.Context, config *types.Prometheus) Registry { standardRegistry := initStandardRegistry(config) - if !registerPromState() { + if !registerPromState(ctx) { return nil } @@ -172,13 +174,14 @@ func initStandardRegistry(config *types.Prometheus) Registry { } } -func registerPromState() bool { +func registerPromState(ctx context.Context) bool { if err := stdprometheus.Register(promState); err != nil { + logger := log.FromContext(ctx) if _, ok := err.(stdprometheus.AlreadyRegisteredError); !ok { - log.Errorf("Unable to register Traefik to Prometheus: %v", err) + logger.Errorf("Unable to register Traefik to Prometheus: %v", err) return false } - log.Debug("Prometheus collector already registered.") + logger.Debug("Prometheus collector already registered.") } return true } @@ -186,23 +189,24 @@ func registerPromState() bool { // OnConfigurationUpdate receives the current configuration from Traefik. // It then converts the configuration to the optimized package internal format // and sets it to the promState. -func OnConfigurationUpdate(configurations types.Configurations) { +func OnConfigurationUpdate(configurations config.Configurations) { dynamicConfig := newDynamicConfig() - for _, config := range configurations { - for _, frontend := range config.Frontends { - for _, entrypointName := range frontend.EntryPoints { - dynamicConfig.entrypoints[entrypointName] = true - } - } - - for backendName, backend := range config.Backends { - dynamicConfig.backends[backendName] = make(map[string]bool) - for _, server := range backend.Servers { - dynamicConfig.backends[backendName][server.URL] = true - } - } - } + // FIXME metrics + // for _, config := range configurations { + // for _, frontend := range config.Frontends { + // for _, entrypointName := range frontend.EntryPoints { + // dynamicConfig.entrypoints[entrypointName] = true + // } + // } + // + // for backendName, backend := range config.Backends { + // dynamicConfig.backends[backendName] = make(map[string]bool) + // for _, server := range backend.Servers { + // dynamicConfig.backends[backendName][server.URL] = true + // } + // } + // } promState.SetDynamicConfig(dynamicConfig) } diff --git a/metrics/prometheus_test.go b/metrics/prometheus_test.go index 520795faa..18631b36a 100644 --- a/metrics/prometheus_test.go +++ b/metrics/prometheus_test.go @@ -1,12 +1,14 @@ package metrics import ( + "context" "fmt" "net/http" "strconv" "testing" "time" + "github.com/containous/traefik/config" th "github.com/containous/traefik/testhelpers" "github.com/containous/traefik/types" "github.com/prometheus/client_golang/prometheus" @@ -69,8 +71,7 @@ func TestRegisterPromState(t *testing.T) { initStandardRegistry(prom) } - promReg := registerPromState() - if promReg != false { + if registerPromState(context.Background()) { actualNbRegistries++ } @@ -101,7 +102,7 @@ func TestPrometheus(t *testing.T) { // Reset state of global promState. defer promState.reset() - prometheusRegistry := RegisterPrometheus(&types.Prometheus{}) + prometheusRegistry := RegisterPrometheus(context.Background(), &types.Prometheus{}) defer prometheus.Unregister(promState) if !prometheusRegistry.IsEnabled() { @@ -266,21 +267,27 @@ func TestPrometheus(t *testing.T) { } func TestPrometheusMetricRemoval(t *testing.T) { + // FIXME metrics + t.Skip("waiting for metrics") + // Reset state of global promState. defer promState.reset() - prometheusRegistry := RegisterPrometheus(&types.Prometheus{}) + prometheusRegistry := RegisterPrometheus(context.Background(), &types.Prometheus{}) defer prometheus.Unregister(promState) - configurations := make(types.Configurations) + configurations := make(config.Configurations) configurations["providerName"] = th.BuildConfiguration( - th.WithFrontends( - th.WithFrontend("backend1", th.WithEntryPoints("entrypoint1")), + th.WithRouters( + th.WithRouter("foo", + th.WithServiceName("bar")), ), - th.WithBackends( - th.WithBackendNew("backend1", th.WithServersNew(th.WithServerNew("http://localhost:9000"))), + th.WithLoadBalancerServices(th.WithService("bar", + th.WithLBMethod("wrr"), + th.WithServers(th.WithServer("http://localhost:9000"))), ), ) + OnConfigurationUpdate(configurations) // Register some metrics manually that are not part of the active configuration. @@ -321,7 +328,7 @@ func TestPrometheusRemovedMetricsReset(t *testing.T) { // Reset state of global promState. defer promState.reset() - prometheusRegistry := RegisterPrometheus(&types.Prometheus{}) + prometheusRegistry := RegisterPrometheus(context.Background(), &types.Prometheus{}) defer prometheus.Unregister(promState) labelNamesValues := []string{ diff --git a/metrics/statsd.go b/metrics/statsd.go index 464067c58..ac10a5d2d 100644 --- a/metrics/statsd.go +++ b/metrics/statsd.go @@ -1,6 +1,7 @@ package metrics import ( + "context" "time" "github.com/containous/traefik/log" @@ -11,7 +12,7 @@ import ( ) var statsdClient = statsd.New("traefik.", kitlog.LoggerFunc(func(keyvals ...interface{}) error { - log.Info(keyvals) + log.WithoutContext().WithField(log.MetricsProviderName, "statsd").Info(keyvals) return nil })) @@ -33,9 +34,9 @@ const ( ) // RegisterStatsd registers the metrics pusher if this didn't happen yet and creates a statsd Registry instance. -func RegisterStatsd(config *types.Statsd) Registry { +func RegisterStatsd(ctx context.Context, config *types.Statsd) Registry { if statsdTicker == nil { - statsdTicker = initStatsdTicker(config) + statsdTicker = initStatsdTicker(ctx, config) } return &standardRegistry{ @@ -56,14 +57,14 @@ func RegisterStatsd(config *types.Statsd) Registry { } // initStatsdTicker initializes metrics pusher and creates a statsdClient if not created already -func initStatsdTicker(config *types.Statsd) *time.Ticker { +func initStatsdTicker(ctx context.Context, config *types.Statsd) *time.Ticker { address := config.Address if len(address) == 0 { address = "localhost:8125" } pushInterval, err := time.ParseDuration(config.PushInterval) if err != nil { - log.Warnf("Unable to parse %s into pushInterval, using 10s as default value", config.PushInterval) + log.FromContext(ctx).Warnf("Unable to parse %s from config.PushInterval: using 10s as the default value", config.PushInterval) pushInterval = 10 * time.Second } diff --git a/metrics/statsd_test.go b/metrics/statsd_test.go index b742db660..cc5409930 100644 --- a/metrics/statsd_test.go +++ b/metrics/statsd_test.go @@ -1,6 +1,7 @@ package metrics import ( + "context" "net/http" "testing" "time" @@ -14,7 +15,7 @@ func TestStatsD(t *testing.T) { // This is needed to make sure that UDP Listener listens for data a bit longer, otherwise it will quit after a millisecond udp.Timeout = 5 * time.Second - statsdRegistry := RegisterStatsd(&types.Statsd{Address: ":18125", PushInterval: "1s"}) + statsdRegistry := RegisterStatsd(context.Background(), &types.Statsd{Address: ":18125", PushInterval: "1s"}) defer StopStatsd() if !statsdRegistry.IsEnabled() { diff --git a/middlewares/accesslog/capture_response_writer.go b/middlewares/accesslog/capture_response_writer.go index 0892b3980..58fd368c4 100644 --- a/middlewares/accesslog/capture_response_writer.go +++ b/middlewares/accesslog/capture_response_writer.go @@ -6,7 +6,7 @@ import ( "net" "net/http" - "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/old/middlewares" ) var ( diff --git a/middlewares/accesslog/field_middleware.go b/middlewares/accesslog/field_middleware.go new file mode 100644 index 000000000..b154f431e --- /dev/null +++ b/middlewares/accesslog/field_middleware.go @@ -0,0 +1,59 @@ +package accesslog + +import ( + "net/http" + "time" + + "github.com/vulcand/oxy/utils" +) + +// FieldApply function hook to add data in accesslog +type FieldApply func(rw http.ResponseWriter, r *http.Request, next http.Handler, data *LogData) + +// FieldHandler sends a new field to the logger. +type FieldHandler struct { + next http.Handler + name string + value string + applyFn FieldApply +} + +// NewFieldHandler creates a Field handler. +func NewFieldHandler(next http.Handler, name string, value string, applyFn FieldApply) http.Handler { + return &FieldHandler{next: next, name: name, value: value, applyFn: applyFn} +} + +func (f *FieldHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + table := GetLogData(req) + if table == nil { + f.next.ServeHTTP(rw, req) + return + } + + table.Core[f.name] = f.value + + if f.applyFn != nil { + f.applyFn(rw, req, f.next, table) + } else { + f.next.ServeHTTP(rw, req) + } +} + +// AddServiceFields add service fields +func AddServiceFields(rw http.ResponseWriter, req *http.Request, next http.Handler, data *LogData) { + data.Core[ServiceURL] = req.URL // note that this is *not* the original incoming URL + data.Core[ServiceAddr] = req.URL.Host + + crw := &captureResponseWriter{rw: rw} + start := time.Now().UTC() + + next.ServeHTTP(crw, req) + + // use UTC to handle switchover of daylight saving correctly + data.Core[OriginDuration] = time.Now().UTC().Sub(start) + data.Core[OriginStatus] = crw.Status() + // make copy of headers so we can ensure there is no subsequent mutation during response processing + data.OriginResponse = make(http.Header) + utils.CopyHeaders(data.OriginResponse, crw.Header()) + data.Core[OriginContentSize] = crw.Size() +} diff --git a/middlewares/accesslog/logdata.go b/middlewares/accesslog/logdata.go index e20e753fe..c21e1c2f3 100644 --- a/middlewares/accesslog/logdata.go +++ b/middlewares/accesslog/logdata.go @@ -12,14 +12,16 @@ const ( // Duration is the map key used for the total time taken by processing the response, including the origin server's time but // not the log writing time. Duration = "Duration" - // FrontendName is the map key used for the name of the Traefik frontend. - FrontendName = "FrontendName" - // BackendName is the map key used for the name of the Traefik backend. - BackendName = "BackendName" - // BackendURL is the map key used for the URL of the Traefik backend. - BackendURL = "BackendURL" - // BackendAddr is the map key used for the IP:port of the Traefik backend (extracted from BackendURL) - BackendAddr = "BackendAddr" + + // RouterName is the map key used for the name of the Traefik router. + RouterName = "RouterName" + // ServiceName is the map key used for the name of the Traefik backend. + ServiceName = "ServiceName" + // ServiceURL is the map key used for the URL of the Traefik backend. + ServiceURL = "ServiceURL" + // ServiceAddr is the map key used for the IP:port of the Traefik backend (extracted from BackendURL) + ServiceAddr = "ServiceAddr" + // ClientAddr is the map key used for the remote address in its original form (usually IP:port). ClientAddr = "ClientAddr" // ClientHost is the map key used for the remote IP address from which the client request was received. @@ -72,9 +74,9 @@ const ( var defaultCoreKeys = [...]string{ StartUTC, Duration, - FrontendName, - BackendName, - BackendURL, + RouterName, + ServiceName, + ServiceURL, ClientHost, ClientPort, ClientUsername, @@ -99,7 +101,7 @@ func init() { for _, k := range defaultCoreKeys { allCoreKeys[k] = struct{}{} } - allCoreKeys[BackendAddr] = struct{}{} + allCoreKeys[ServiceAddr] = struct{}{} allCoreKeys[ClientAddr] = struct{}{} allCoreKeys[RequestAddr] = struct{}{} allCoreKeys[GzipRatio] = struct{}{} diff --git a/middlewares/accesslog/logger.go b/middlewares/accesslog/logger.go index 922011a23..ab157574a 100644 --- a/middlewares/accesslog/logger.go +++ b/middlewares/accesslog/logger.go @@ -12,6 +12,7 @@ import ( "sync/atomic" "time" + "github.com/containous/alice" "github.com/containous/flaeg/parse" "github.com/containous/traefik/log" "github.com/containous/traefik/types" @@ -21,36 +22,44 @@ import ( type key string const ( - // DataTableKey is the key within the request context used to - // store the Log Data Table + // DataTableKey is the key within the request context used to store the Log Data Table. DataTableKey key = "LogDataTable" - // CommonFormat is the common logging format (CLF) + // CommonFormat is the common logging format (CLF). CommonFormat string = "common" - // JSONFormat is the JSON logging format + // JSONFormat is the JSON logging format. JSONFormat string = "json" ) -type logHandlerParams struct { +type handlerParams struct { logDataTable *LogData crr *captureRequestReader crw *captureResponseWriter } -// LogHandler will write each request and its response to the access log. -type LogHandler struct { +// Handler will write each request and its response to the access log. +type Handler struct { config *types.AccessLog logger *logrus.Logger file *os.File mu sync.Mutex httpCodeRanges types.HTTPCodeRanges - logHandlerChan chan logHandlerParams + logHandlerChan chan handlerParams wg sync.WaitGroup } -// NewLogHandler creates a new LogHandler -func NewLogHandler(config *types.AccessLog) (*LogHandler, error) { +// WrapHandler Wraps access log handler into an Alice Constructor. +func WrapHandler(handler *Handler) alice.Constructor { + return func(next http.Handler) (http.Handler, error) { + return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + handler.ServeHTTP(rw, req, next.ServeHTTP) + }), nil + } +} + +// NewHandler creates a new Handler. +func NewHandler(config *types.AccessLog) (*Handler, error) { file := os.Stdout if len(config.FilePath) > 0 { f, err := openAccessLogFile(config.FilePath) @@ -59,7 +68,7 @@ func NewLogHandler(config *types.AccessLog) (*LogHandler, error) { } file = f } - logHandlerChan := make(chan logHandlerParams, config.BufferingSize) + logHandlerChan := make(chan handlerParams, config.BufferingSize) var formatter logrus.Formatter @@ -79,7 +88,7 @@ func NewLogHandler(config *types.AccessLog) (*LogHandler, error) { Level: logrus.InfoLevel, } - logHandler := &LogHandler{ + logHandler := &Handler{ config: config, logger: logger, file: file, @@ -88,7 +97,7 @@ func NewLogHandler(config *types.AccessLog) (*LogHandler, error) { if config.Filters != nil { if httpCodeRanges, err := types.NewHTTPCodeRanges(config.Filters.StatusCodes); err != nil { - log.Errorf("Failed to create new HTTP code ranges: %s", err) + log.WithoutContext().Errorf("Failed to create new HTTP code ranges: %s", err) } else { logHandler.httpCodeRanges = httpCodeRanges } @@ -122,17 +131,16 @@ func openAccessLogFile(filePath string) (*os.File, error) { return file, nil } -// GetLogDataTable gets the request context object that contains logging data. +// GetLogData gets the request context object that contains logging data. // This creates data as the request passes through the middleware chain. -func GetLogDataTable(req *http.Request) *LogData { +func GetLogData(req *http.Request) *LogData { if ld, ok := req.Context().Value(DataTableKey).(*LogData); ok { return ld } - log.Errorf("%s is nil", DataTableKey) - return &LogData{Core: make(CoreLogData)} + return nil } -func (l *LogHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next http.HandlerFunc) { +func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next http.HandlerFunc) { now := time.Now().UTC() core := CoreLogData{ @@ -179,46 +187,45 @@ func (l *LogHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next h next.ServeHTTP(crw, reqWithDataTable) - core[ClientUsername] = formatUsernameForLog(core[ClientUsername]) + core[ClientUsername] = usernameIfPresent(reqWithDataTable.URL) logDataTable.DownstreamResponse = crw.Header() - if l.config.BufferingSize > 0 { - l.logHandlerChan <- logHandlerParams{ + if h.config.BufferingSize > 0 { + h.logHandlerChan <- handlerParams{ logDataTable: logDataTable, crr: crr, crw: crw, } } else { - l.logTheRoundTrip(logDataTable, crr, crw) + h.logTheRoundTrip(logDataTable, crr, crw) } } // Close closes the Logger (i.e. the file, drain logHandlerChan, etc). -func (l *LogHandler) Close() error { - close(l.logHandlerChan) - l.wg.Wait() - return l.file.Close() +func (h *Handler) Close() error { + close(h.logHandlerChan) + h.wg.Wait() + return h.file.Close() } -// Rotate closes and reopens the log file to allow for rotation -// by an external source. -func (l *LogHandler) Rotate() error { +// Rotate closes and reopens the log file to allow for rotation by an external source. +func (h *Handler) Rotate() error { var err error - if l.file != nil { + if h.file != nil { defer func(f *os.File) { f.Close() - }(l.file) + }(h.file) } - l.file, err = os.OpenFile(l.config.FilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + h.file, err = os.OpenFile(h.config.FilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) if err != nil { return err } - l.mu.Lock() - defer l.mu.Unlock() - l.logger.Out = l.file + h.mu.Lock() + defer h.mu.Unlock() + h.logger.Out = h.file return nil } @@ -230,16 +237,17 @@ func silentSplitHostPort(value string) (host string, port string) { return host, port } -func formatUsernameForLog(usernameField interface{}) string { - username, ok := usernameField.(string) - if ok && len(username) != 0 { - return username +func usernameIfPresent(theURL *url.URL) string { + if theURL.User != nil { + if name := theURL.User.Username(); name != "" { + return name + } } return "-" } -// Logging handler to log frontend name, backend name, and elapsed time -func (l *LogHandler) logTheRoundTrip(logDataTable *LogData, crr *captureRequestReader, crw *captureResponseWriter) { +// Logging handler to log frontend name, backend name, and elapsed time. +func (h *Handler) logTheRoundTrip(logDataTable *LogData, crr *captureRequestReader, crw *captureResponseWriter) { core := logDataTable.Core retryAttempts, ok := core[RetryAttempts].(int) @@ -254,11 +262,11 @@ func (l *LogHandler) logTheRoundTrip(logDataTable *LogData, crr *captureRequestR core[DownstreamStatus] = crw.Status() - // n.b. take care to perform time arithmetic using UTC to avoid errors at DST boundaries + // n.b. take care to perform time arithmetic using UTC to avoid errors at DST boundaries. totalDuration := time.Now().UTC().Sub(core[StartUTC].(time.Time)) core[Duration] = totalDuration - if l.keepAccessLog(crw.Status(), retryAttempts, totalDuration) { + if h.keepAccessLog(crw.Status(), retryAttempts, totalDuration) { core[DownstreamContentSize] = crw.Size() if original, ok := core[OriginContentSize]; ok { o64 := original.(int64) @@ -275,24 +283,24 @@ func (l *LogHandler) logTheRoundTrip(logDataTable *LogData, crr *captureRequestR fields := logrus.Fields{} for k, v := range logDataTable.Core { - if l.config.Fields.Keep(k) { + if h.config.Fields.Keep(k) { fields[k] = v } } - l.redactHeaders(logDataTable.Request, fields, "request_") - l.redactHeaders(logDataTable.OriginResponse, fields, "origin_") - l.redactHeaders(logDataTable.DownstreamResponse, fields, "downstream_") + h.redactHeaders(logDataTable.Request, fields, "request_") + h.redactHeaders(logDataTable.OriginResponse, fields, "origin_") + h.redactHeaders(logDataTable.DownstreamResponse, fields, "downstream_") - l.mu.Lock() - defer l.mu.Unlock() - l.logger.WithFields(fields).Println() + h.mu.Lock() + defer h.mu.Unlock() + h.logger.WithFields(fields).Println() } } -func (l *LogHandler) redactHeaders(headers http.Header, fields logrus.Fields, prefix string) { +func (h *Handler) redactHeaders(headers http.Header, fields logrus.Fields, prefix string) { for k := range headers { - v := l.config.Fields.KeepHeader(k) + v := h.config.Fields.KeepHeader(k) if v == types.AccessLogKeep { fields[prefix+k] = headers.Get(k) } else if v == types.AccessLogRedact { @@ -301,26 +309,26 @@ func (l *LogHandler) redactHeaders(headers http.Header, fields logrus.Fields, pr } } -func (l *LogHandler) keepAccessLog(statusCode, retryAttempts int, duration time.Duration) bool { - if l.config.Filters == nil { +func (h *Handler) keepAccessLog(statusCode, retryAttempts int, duration time.Duration) bool { + if h.config.Filters == nil { // no filters were specified return true } - if len(l.httpCodeRanges) == 0 && !l.config.Filters.RetryAttempts && l.config.Filters.MinDuration == 0 { + if len(h.httpCodeRanges) == 0 && !h.config.Filters.RetryAttempts && h.config.Filters.MinDuration == 0 { // empty filters were specified, e.g. by passing --accessLog.filters only (without other filter options) return true } - if l.httpCodeRanges.Contains(statusCode) { + if h.httpCodeRanges.Contains(statusCode) { return true } - if l.config.Filters.RetryAttempts && retryAttempts > 0 { + if h.config.Filters.RetryAttempts && retryAttempts > 0 { return true } - if l.config.Filters.MinDuration > 0 && (parse.Duration(duration) > l.config.Filters.MinDuration) { + if h.config.Filters.MinDuration > 0 && (parse.Duration(duration) > h.config.Filters.MinDuration) { return true } diff --git a/middlewares/accesslog/logger_formatters.go b/middlewares/accesslog/logger_formatters.go index 4755079fe..6e17b17e6 100644 --- a/middlewares/accesslog/logger_formatters.go +++ b/middlewares/accesslog/logger_formatters.go @@ -8,16 +8,16 @@ import ( "github.com/sirupsen/logrus" ) -// default format for time presentation +// default format for time presentation. const ( commonLogTimeFormat = "02/Jan/2006:15:04:05 -0700" defaultValue = "-" ) -// CommonLogFormatter provides formatting in the Traefik common log format +// CommonLogFormatter provides formatting in the Traefik common log format. type CommonLogFormatter struct{} -// Format formats the log entry in the Traefik common log format +// Format formats the log entry in the Traefik common log format. func (f *CommonLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { b := &bytes.Buffer{} @@ -43,8 +43,8 @@ func (f *CommonLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { toLog(entry.Data, "request_Referer", `"-"`, true), toLog(entry.Data, "request_User-Agent", `"-"`, true), toLog(entry.Data, RequestCount, defaultValue, true), - toLog(entry.Data, FrontendName, defaultValue, true), - toLog(entry.Data, BackendURL, defaultValue, true), + toLog(entry.Data, RouterName, defaultValue, true), + toLog(entry.Data, ServiceURL, defaultValue, true), elapsedMillis) return b.Bytes(), err @@ -70,6 +70,7 @@ func toLog(fields logrus.Fields, key string, defaultValue string, quoted bool) i return defaultValue } + func toLogEntry(s string, defaultValue string, quote bool) string { if len(s) == 0 { return defaultValue diff --git a/middlewares/accesslog/logger_formatters_test.go b/middlewares/accesslog/logger_formatters_test.go index 22b68da58..54be292ed 100644 --- a/middlewares/accesslog/logger_formatters_test.go +++ b/middlewares/accesslog/logger_formatters_test.go @@ -32,8 +32,8 @@ func TestCommonLogFormatter_Format(t *testing.T) { RequestRefererHeader: "", RequestUserAgentHeader: "", RequestCount: 0, - FrontendName: "", - BackendURL: "", + RouterName: "", + ServiceURL: "", }, expectedLog: `10.0.0.1 - Client [10/Nov/2009:23:00:00 +0000] "GET /foo http" - - "-" "-" 0 - - 123000ms `, @@ -53,8 +53,8 @@ func TestCommonLogFormatter_Format(t *testing.T) { RequestRefererHeader: "referer", RequestUserAgentHeader: "agent", RequestCount: nil, - FrontendName: "foo", - BackendURL: "http://10.0.0.2/toto", + RouterName: "foo", + ServiceURL: "http://10.0.0.2/toto", }, expectedLog: `10.0.0.1 - Client [10/Nov/2009:23:00:00 +0000] "GET /foo http" 123 132 "referer" "agent" - "foo" "http://10.0.0.2/toto" 123000ms `, diff --git a/middlewares/accesslog/logger_test.go b/middlewares/accesslog/logger_test.go index 9809458f8..9ac42af91 100644 --- a/middlewares/accesslog/logger_test.go +++ b/middlewares/accesslog/logger_test.go @@ -15,7 +15,6 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/log" "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -24,8 +23,8 @@ import ( var ( logFileNameSuffix = "/traefik/logger/test.log" testContent = "Hello, World" - testBackendName = "http://127.0.0.1/testBackend" - testFrontendName = "testFrontend" + testServiceName = "http://127.0.0.1/testService" + testRouterName = "testRouter" testStatus = 123 testContentSize int64 = 12 testHostname = "TestHost" @@ -50,7 +49,7 @@ func TestLogRotation(t *testing.T) { rotatedFileName := fileName + ".rotated" config := &types.AccessLog{FilePath: fileName, Format: CommonFormat} - logHandler, err := NewLogHandler(config) + logHandler, err := NewHandler(config) if err != nil { t.Fatalf("Error creating new log handler: %s", err) } @@ -129,7 +128,7 @@ func TestLoggerCLF(t *testing.T) { logData, err := ioutil.ReadFile(logFilePath) require.NoError(t, err) - expectedLog := ` TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 1 "testFrontend" "http://127.0.0.1/testBackend" 1ms` + expectedLog := ` TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 1 "testRouter" "http://127.0.0.1/testService" 1ms` assertValidLogData(t, expectedLog, logData) } @@ -144,7 +143,7 @@ func TestAsyncLoggerCLF(t *testing.T) { logData, err := ioutil.ReadFile(logFilePath) require.NoError(t, err) - expectedLog := ` TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 1 "testFrontend" "http://127.0.0.1/testBackend" 1ms` + expectedLog := ` TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 1 "testRouter" "http://127.0.0.1/testService" 1ms` assertValidLogData(t, expectedLog, logData) } @@ -156,11 +155,11 @@ func assertString(exp string) func(t *testing.T, actual interface{}) { } } -func assertNotEqual(exp string) func(t *testing.T, actual interface{}) { +func assertNotEmpty() func(t *testing.T, actual interface{}) { return func(t *testing.T, actual interface{}) { t.Helper() - assert.NotEqual(t, exp, actual) + assert.NotEqual(t, "", actual) } } @@ -205,8 +204,8 @@ func TestLoggerJSON(t *testing.T) { OriginStatus: assertFloat64(float64(testStatus)), RequestRefererHeader: assertString(testReferer), RequestUserAgentHeader: assertString(testUserAgent), - FrontendName: assertString(testFrontendName), - BackendURL: assertString(testBackendName), + RouterName: assertString(testRouterName), + ServiceURL: assertString(testServiceName), ClientUsername: assertString(testUsername), ClientHost: assertString(testHostname), ClientPort: assertString(fmt.Sprintf("%d", testPort)), @@ -218,9 +217,9 @@ func TestLoggerJSON(t *testing.T) { Duration: assertFloat64NotZero(), Overhead: assertFloat64NotZero(), RetryAttempts: assertFloat64(float64(testRetryAttempts)), - "time": assertNotEqual(""), - "StartLocal": assertNotEqual(""), - "StartUTC": assertNotEqual(""), + "time": assertNotEmpty(), + "StartLocal": assertNotEmpty(), + "StartUTC": assertNotEmpty(), }, }, { @@ -235,7 +234,7 @@ func TestLoggerJSON(t *testing.T) { expected: map[string]func(t *testing.T, value interface{}){ "level": assertString("info"), "msg": assertString(""), - "time": assertNotEqual(""), + "time": assertNotEmpty(), "downstream_Content-Type": assertString("text/plain; charset=utf-8"), RequestRefererHeader: assertString(testReferer), RequestUserAgentHeader: assertString(testUserAgent), @@ -256,7 +255,7 @@ func TestLoggerJSON(t *testing.T) { expected: map[string]func(t *testing.T, value interface{}){ "level": assertString("info"), "msg": assertString(""), - "time": assertNotEqual(""), + "time": assertNotEmpty(), }, }, { @@ -274,7 +273,7 @@ func TestLoggerJSON(t *testing.T) { expected: map[string]func(t *testing.T, value interface{}){ "level": assertString("info"), "msg": assertString(""), - "time": assertNotEqual(""), + "time": assertNotEmpty(), "downstream_Content-Type": assertString("REDACTED"), RequestRefererHeader: assertString("REDACTED"), RequestUserAgentHeader: assertString("REDACTED"), @@ -302,7 +301,7 @@ func TestLoggerJSON(t *testing.T) { RequestHost: assertString(testHostname), "level": assertString("info"), "msg": assertString(""), - "time": assertNotEqual(""), + "time": assertNotEmpty(), RequestRefererHeader: assertString(testReferer), }, }, @@ -349,7 +348,7 @@ func TestNewLogHandlerOutputStdout(t *testing.T) { FilePath: "", Format: CommonFormat, }, - expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testRouter" "http://127.0.0.1/testService" 1ms`, }, { desc: "default config with empty filters", @@ -358,7 +357,7 @@ func TestNewLogHandlerOutputStdout(t *testing.T) { Format: CommonFormat, Filters: &types.AccessLogFilters{}, }, - expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testRouter" "http://127.0.0.1/testService" 1ms`, }, { desc: "Status code filter not matching", @@ -380,7 +379,7 @@ func TestNewLogHandlerOutputStdout(t *testing.T) { StatusCodes: []string{"123"}, }, }, - expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testRouter" "http://127.0.0.1/testService" 1ms`, }, { desc: "Duration filter not matching", @@ -402,7 +401,7 @@ func TestNewLogHandlerOutputStdout(t *testing.T) { MinDuration: parse.Duration(1 * time.Millisecond), }, }, - expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testRouter" "http://127.0.0.1/testService" 1ms`, }, { desc: "Retry attempts filter matching", @@ -413,7 +412,7 @@ func TestNewLogHandlerOutputStdout(t *testing.T) { RetryAttempts: true, }, }, - expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testRouter" "http://127.0.0.1/testService" 1ms`, }, { desc: "Default mode keep", @@ -424,7 +423,7 @@ func TestNewLogHandlerOutputStdout(t *testing.T) { DefaultMode: "keep", }, }, - expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testRouter" "http://127.0.0.1/testService" 1ms`, }, { desc: "Default mode keep with override", @@ -438,7 +437,7 @@ func TestNewLogHandlerOutputStdout(t *testing.T) { }, }, }, - expectedLog: `- - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + expectedLog: `- - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testRouter" "http://127.0.0.1/testService" 1ms`, }, { desc: "Default mode drop", @@ -572,8 +571,8 @@ func assertValidLogData(t *testing.T, expected string, logData []byte) { assert.Equal(t, resultExpected[RequestRefererHeader], result[RequestRefererHeader], formatErrMessage) assert.Equal(t, resultExpected[RequestUserAgentHeader], result[RequestUserAgentHeader], formatErrMessage) assert.Regexp(t, regexp.MustCompile("[0-9]*"), result[RequestCount], formatErrMessage) - assert.Equal(t, resultExpected[FrontendName], result[FrontendName], formatErrMessage) - assert.Equal(t, resultExpected[BackendURL], result[BackendURL], formatErrMessage) + assert.Equal(t, resultExpected[RouterName], result[RouterName], formatErrMessage) + assert.Equal(t, resultExpected[ServiceURL], result[ServiceURL], formatErrMessage) assert.Regexp(t, regexp.MustCompile("[0-9]*ms"), result[Duration], formatErrMessage) } @@ -599,7 +598,7 @@ func createTempDir(t *testing.T, prefix string) string { } func doLogging(t *testing.T, config *types.AccessLog) { - logger, err := NewLogHandler(config) + logger, err := NewHandler(config) require.NoError(t, err) defer logger.Close() @@ -618,6 +617,7 @@ func doLogging(t *testing.T, config *types.AccessLog) { Method: testMethod, RemoteAddr: fmt.Sprintf("%s:%d", testHostname, testPort), URL: &url.URL{ + User: url.UserPassword(testUsername, ""), Path: testPath, }, } @@ -627,18 +627,23 @@ func doLogging(t *testing.T, config *types.AccessLog) { func logWriterTestHandlerFunc(rw http.ResponseWriter, r *http.Request) { if _, err := rw.Write([]byte(testContent)); err != nil { - log.Error(err) + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + logData := GetLogData(r) + if logData != nil { + logData.Core[RouterName] = testRouterName + logData.Core[ServiceURL] = testServiceName + logData.Core[OriginStatus] = testStatus + logData.Core[OriginContentSize] = testContentSize + logData.Core[RetryAttempts] = testRetryAttempts + logData.Core[StartUTC] = testStart.UTC() + logData.Core[StartLocal] = testStart.Local() + } else { + http.Error(rw, "LogData is nil", http.StatusInternalServerError) + return } rw.WriteHeader(testStatus) - - logDataTable := GetLogDataTable(r) - logDataTable.Core[FrontendName] = testFrontendName - logDataTable.Core[BackendURL] = testBackendName - logDataTable.Core[OriginStatus] = testStatus - logDataTable.Core[OriginContentSize] = testContentSize - logDataTable.Core[RetryAttempts] = testRetryAttempts - logDataTable.Core[StartUTC] = testStart.UTC() - logDataTable.Core[StartLocal] = testStart.Local() - logDataTable.Core[ClientUsername] = testUsername } diff --git a/middlewares/accesslog/parser.go b/middlewares/accesslog/parser.go index c2931d153..281a4bbb9 100644 --- a/middlewares/accesslog/parser.go +++ b/middlewares/accesslog/parser.go @@ -45,8 +45,8 @@ func ParseAccessLog(data string) (map[string]string, error) { result[RequestRefererHeader] = submatch[9] result[RequestUserAgentHeader] = submatch[10] result[RequestCount] = submatch[11] - result[FrontendName] = submatch[12] - result[BackendURL] = submatch[13] + result[RouterName] = submatch[12] + result[ServiceURL] = submatch[13] result[Duration] = submatch[14] } diff --git a/middlewares/accesslog/parser_test.go b/middlewares/accesslog/parser_test.go index 701fed4c3..1712f1a8c 100644 --- a/middlewares/accesslog/parser_test.go +++ b/middlewares/accesslog/parser_test.go @@ -14,7 +14,7 @@ func TestParseAccessLog(t *testing.T) { }{ { desc: "full log", - value: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 1 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + value: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 1 "testRouter" "http://127.0.0.1/testService" 1ms`, expected: map[string]string{ ClientHost: "TestHost", ClientUsername: "TestUser", @@ -27,14 +27,14 @@ func TestParseAccessLog(t *testing.T) { RequestRefererHeader: `"testReferer"`, RequestUserAgentHeader: `"testUserAgent"`, RequestCount: "1", - FrontendName: `"testFrontend"`, - BackendURL: `"http://127.0.0.1/testBackend"`, + RouterName: `"testRouter"`, + ServiceURL: `"http://127.0.0.1/testService"`, Duration: "1ms", }, }, { desc: "log with space", - value: `127.0.0.1 - - [09/Mar/2018:10:51:32 +0000] "GET / HTTP/1.1" 401 17 "-" "Go-http-client/1.1" 1 "testFrontend with space" - 0ms`, + value: `127.0.0.1 - - [09/Mar/2018:10:51:32 +0000] "GET / HTTP/1.1" 401 17 "-" "Go-http-client/1.1" 1 "testRouter with space" - 0ms`, expected: map[string]string{ ClientHost: "127.0.0.1", ClientUsername: "-", @@ -47,8 +47,8 @@ func TestParseAccessLog(t *testing.T) { RequestRefererHeader: `"-"`, RequestUserAgentHeader: `"Go-http-client/1.1"`, RequestCount: "1", - FrontendName: `"testFrontend with space"`, - BackendURL: `-`, + RouterName: `"testRouter with space"`, + ServiceURL: `-`, Duration: "0ms", }, }, diff --git a/middlewares/accesslog/save_retries.go b/middlewares/accesslog/save_retries.go index 56b19a14b..595727300 100644 --- a/middlewares/accesslog/save_retries.go +++ b/middlewares/accesslog/save_retries.go @@ -14,6 +14,8 @@ func (s *SaveRetries) Retried(req *http.Request, attempt int) { attempt-- } - table := GetLogDataTable(req) - table.Core[RetryAttempts] = attempt + table := GetLogData(req) + if table != nil { + table.Core[RetryAttempts] = attempt + } } diff --git a/middlewares/addprefix/add_prefix.go b/middlewares/addprefix/add_prefix.go new file mode 100644 index 000000000..993760559 --- /dev/null +++ b/middlewares/addprefix/add_prefix.go @@ -0,0 +1,62 @@ +package addprefix + +import ( + "context" + "fmt" + "net/http" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" +) + +const ( + typeName = "AddPrefix" +) + +// AddPrefix is a middleware used to add prefix to an URL request. +type addPrefix struct { + next http.Handler + prefix string + name string +} + +// New creates a new handler. +func New(ctx context.Context, next http.Handler, config config.AddPrefix, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + var result *addPrefix + + if len(config.Prefix) > 0 { + result = &addPrefix{ + prefix: config.Prefix, + next: next, + name: name, + } + } else { + return nil, fmt.Errorf("prefix cannot be empty") + } + + return result, nil +} + +func (ap *addPrefix) GetTracingInformation() (string, ext.SpanKindEnum) { + return ap.name, tracing.SpanKindNoneEnum +} + +func (ap *addPrefix) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + logger := middlewares.GetLogger(req.Context(), ap.name, typeName) + + oldURLPath := req.URL.Path + req.URL.Path = ap.prefix + req.URL.Path + logger.Debugf("URL.Path is now %s (was %s).", req.URL.Path, oldURLPath) + + if req.URL.RawPath != "" { + oldURLRawPath := req.URL.RawPath + req.URL.RawPath = ap.prefix + req.URL.RawPath + logger.Debugf("URL.RawPath is now %s (was %s).", req.URL.RawPath, oldURLRawPath) + } + req.RequestURI = req.URL.RequestURI() + + ap.next.ServeHTTP(rw, req) +} diff --git a/middlewares/addprefix/add_prefix_test.go b/middlewares/addprefix/add_prefix_test.go new file mode 100644 index 000000000..ba4fff6cb --- /dev/null +++ b/middlewares/addprefix/add_prefix_test.go @@ -0,0 +1,104 @@ +package addprefix + +import ( + "context" + "net/http" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/testhelpers" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewAddPrefix(t *testing.T) { + testCases := []struct { + desc string + prefix config.AddPrefix + expectsError bool + }{ + { + desc: "Works with a non empty prefix", + prefix: config.AddPrefix{Prefix: "/a"}, + }, + { + desc: "Fails if prefix is empty", + prefix: config.AddPrefix{Prefix: ""}, + expectsError: true, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) + + _, err := New(context.Background(), next, test.prefix, "foo-add-prefix") + if test.expectsError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestAddPrefix(t *testing.T) { + logrus.SetLevel(logrus.DebugLevel) + testCases := []struct { + desc string + prefix config.AddPrefix + path string + expectedPath string + expectedRawPath string + }{ + { + desc: "Works with a regular path", + prefix: config.AddPrefix{Prefix: "/a"}, + path: "/b", + expectedPath: "/a/b", + }, + { + desc: "Works with a raw path", + prefix: config.AddPrefix{Prefix: "/a"}, + path: "/b%2Fc", + expectedPath: "/a/b/c", + expectedRawPath: "/a/b%2Fc", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + var actualPath, actualRawPath, requestURI string + + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + actualPath = r.URL.Path + actualRawPath = r.URL.RawPath + requestURI = r.RequestURI + }) + + req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost"+test.path, nil) + + handler, err := New(context.Background(), next, test.prefix, "foo-add-prefix") + require.NoError(t, err) + + handler.ServeHTTP(nil, req) + + assert.Equal(t, test.expectedPath, actualPath) + assert.Equal(t, test.expectedRawPath, actualRawPath) + + expectedURI := test.expectedPath + if test.expectedRawPath != "" { + // go HTTP uses the raw path when existent in the RequestURI + expectedURI = test.expectedRawPath + } + assert.Equal(t, expectedURI, requestURI) + }) + } +} diff --git a/middlewares/auth/auth.go b/middlewares/auth/auth.go new file mode 100644 index 000000000..c2373f0e5 --- /dev/null +++ b/middlewares/auth/auth.go @@ -0,0 +1,65 @@ +package auth + +import ( + "io/ioutil" + "strings" +) + +// UserParser Parses a string and return a userName/userHash. An error if the format of the string is incorrect. +type UserParser func(user string) (string, string, error) + +const ( + defaultRealm = "traefik" + authorizationHeader = "Authorization" +) + +func getUsers(fileName string, appendUsers []string, parser UserParser) (map[string]string, error) { + users, err := loadUsers(fileName, appendUsers) + if err != nil { + return nil, err + } + + userMap := make(map[string]string) + for _, user := range users { + userName, userHash, err := parser(user) + if err != nil { + return nil, err + } + userMap[userName] = userHash + } + + return userMap, nil +} + +func loadUsers(fileName string, appendUsers []string) ([]string, error) { + var users []string + var err error + + if fileName != "" { + users, err = getLinesFromFile(fileName) + if err != nil { + return nil, err + } + } + + return append(users, appendUsers...), nil +} + +func getLinesFromFile(filename string) ([]string, error) { + dat, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + // Trim lines and filter out blanks + rawLines := strings.Split(string(dat), "\n") + var filteredLines []string + for _, rawLine := range rawLines { + line := strings.TrimSpace(rawLine) + if line != "" { + filteredLines = append(filteredLines, line) + } + } + + return filteredLines, nil +} diff --git a/middlewares/auth/basic_auth.go b/middlewares/auth/basic_auth.go new file mode 100644 index 000000000..13bed3741 --- /dev/null +++ b/middlewares/auth/basic_auth.go @@ -0,0 +1,102 @@ +package auth + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + + goauth "github.com/abbot/go-http-auth" + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/middlewares/accesslog" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" +) + +const ( + basicTypeName = "BasicAuth" +) + +type basicAuth struct { + next http.Handler + auth *goauth.BasicAuth + users map[string]string + headerField string + removeHeader bool + name string +} + +// NewBasic creates a basicAuth middleware. +func NewBasic(ctx context.Context, next http.Handler, authConfig config.BasicAuth, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, basicTypeName).Debug("Creating middleware") + users, err := getUsers(authConfig.UsersFile, authConfig.Users, basicUserParser) + if err != nil { + return nil, err + } + + ba := &basicAuth{ + next: next, + users: users, + headerField: authConfig.HeaderField, + removeHeader: authConfig.RemoveHeader, + name: name, + } + + realm := defaultRealm + if len(authConfig.Realm) > 0 { + realm = authConfig.Realm + } + ba.auth = goauth.NewBasicAuthenticator(realm, ba.secretBasic) + + return ba, nil +} + +func (b *basicAuth) GetTracingInformation() (string, ext.SpanKindEnum) { + return b.name, tracing.SpanKindNoneEnum +} + +func (b *basicAuth) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + logger := middlewares.GetLogger(req.Context(), b.name, basicTypeName) + + if username := b.auth.CheckAuth(req); username == "" { + logger.Debug("Authentication failed") + tracing.SetErrorWithEvent(req, "Authentication failed") + b.auth.RequireAuth(rw, req) + } else { + logger.Debug("Authentication succeeded") + req.URL.User = url.User(username) + + logData := accesslog.GetLogData(req) + if logData != nil { + logData.Core[accesslog.ClientUsername] = username + } + + if b.headerField != "" { + req.Header[b.headerField] = []string{username} + } + + if b.removeHeader { + logger.Debug("Removing authorization header") + req.Header.Del(authorizationHeader) + } + b.next.ServeHTTP(rw, req) + } +} + +func (b *basicAuth) secretBasic(user, realm string) string { + if secret, ok := b.users[user]; ok { + return secret + } + + return "" +} + +func basicUserParser(user string) (string, string, error) { + split := strings.Split(user, ":") + if len(split) != 2 { + return "", "", fmt.Errorf("error parsing BasicUser: %v", user) + } + return split[0], split[1], nil +} diff --git a/middlewares/auth/basic_auth_test.go b/middlewares/auth/basic_auth_test.go new file mode 100644 index 000000000..917bc59b7 --- /dev/null +++ b/middlewares/auth/basic_auth_test.go @@ -0,0 +1,274 @@ +package auth + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBasicAuthFail(t *testing.T) { + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "traefik") + }) + + auth := config.BasicAuth{ + Users: []string{"test"}, + } + _, err := NewBasic(context.Background(), next, auth, "authName") + require.Error(t, err) + + auth2 := config.BasicAuth{ + Users: []string{"test:test"}, + } + authMiddleware, err := NewBasic(context.Background(), next, auth2, "authTest") + require.NoError(t, err) + + ts := httptest.NewServer(authMiddleware) + defer ts.Close() + + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + req.SetBasicAuth("test", "test") + + res, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusUnauthorized, res.StatusCode, "they should be equal") +} + +func TestBasicAuthSuccess(t *testing.T) { + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "traefik") + }) + + auth := config.BasicAuth{ + Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"}, + } + authMiddleware, err := NewBasic(context.Background(), next, auth, "authName") + require.NoError(t, err) + + ts := httptest.NewServer(authMiddleware) + defer ts.Close() + + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + req.SetBasicAuth("test", "test") + + res, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, res.StatusCode, "they should be equal") + + body, err := ioutil.ReadAll(res.Body) + require.NoError(t, err) + defer res.Body.Close() + + assert.Equal(t, "traefik\n", string(body), "they should be equal") +} + +func TestBasicAuthUserHeader(t *testing.T) { + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "test", r.Header["X-Webauth-User"][0], "auth user should be set") + fmt.Fprintln(w, "traefik") + }) + + auth := config.BasicAuth{ + Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"}, + HeaderField: "X-Webauth-User", + } + middleware, err := NewBasic(context.Background(), next, auth, "authName") + require.NoError(t, err) + + ts := httptest.NewServer(middleware) + defer ts.Close() + + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + req.SetBasicAuth("test", "test") + + res, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, res.StatusCode) + + body, err := ioutil.ReadAll(res.Body) + require.NoError(t, err) + defer res.Body.Close() + + assert.Equal(t, "traefik\n", string(body)) +} + +func TestBasicAuthHeaderRemoved(t *testing.T) { + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get(authorizationHeader)) + fmt.Fprintln(w, "traefik") + }) + + auth := config.BasicAuth{ + RemoveHeader: true, + Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"}, + } + middleware, err := NewBasic(context.Background(), next, auth, "authName") + require.NoError(t, err) + + ts := httptest.NewServer(middleware) + defer ts.Close() + + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + req.SetBasicAuth("test", "test") + + res, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, res.StatusCode) + + body, err := ioutil.ReadAll(res.Body) + require.NoError(t, err) + err = res.Body.Close() + require.NoError(t, err) + + assert.Equal(t, "traefik\n", string(body)) +} + +func TestBasicAuthHeaderPresent(t *testing.T) { + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.NotEmpty(t, r.Header.Get(authorizationHeader)) + fmt.Fprintln(w, "traefik") + }) + + auth := config.BasicAuth{ + Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"}, + } + middleware, err := NewBasic(context.Background(), next, auth, "authName") + require.NoError(t, err) + + ts := httptest.NewServer(middleware) + defer ts.Close() + + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + req.SetBasicAuth("test", "test") + + res, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, res.StatusCode) + + body, err := ioutil.ReadAll(res.Body) + require.NoError(t, err) + err = res.Body.Close() + require.NoError(t, err) + + assert.Equal(t, "traefik\n", string(body)) +} + +func TestBasicAuthUsersFromFile(t *testing.T) { + testCases := []struct { + desc string + userFileContent string + expectedUsers map[string]string + givenUsers []string + realm string + }{ + { + desc: "Finds the users in the file", + userFileContent: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/\ntest2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0\n", + givenUsers: []string{}, + expectedUsers: map[string]string{"test": "test", "test2": "test2"}, + }, + { + desc: "Merges given users with users from the file", + userFileContent: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/\n", + givenUsers: []string{"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "test3:$apr1$3rJbDP0q$RfzJiorTk78jQ1EcKqWso0"}, + expectedUsers: map[string]string{"test": "test", "test2": "test2", "test3": "test3"}, + }, + { + desc: "Given users have priority over users in the file", + userFileContent: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/\ntest2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0\n", + givenUsers: []string{"test2:$apr1$mK.GtItK$ncnLYvNLek0weXdxo68690"}, + expectedUsers: map[string]string{"test": "test", "test2": "overridden"}, + }, + { + desc: "Should authenticate the correct user based on the realm", + userFileContent: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/\ntest2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0\n", + givenUsers: []string{"test2:$apr1$mK.GtItK$ncnLYvNLek0weXdxo68690"}, + expectedUsers: map[string]string{"test": "test", "test2": "overridden"}, + realm: "trafikee", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + // Creates the temporary configuration file with the users + usersFile, err := ioutil.TempFile("", "auth-users") + require.NoError(t, err) + defer os.Remove(usersFile.Name()) + + _, err = usersFile.Write([]byte(test.userFileContent)) + require.NoError(t, err) + + // Creates the configuration for our Authenticator + authenticatorConfiguration := config.BasicAuth{ + Users: test.givenUsers, + UsersFile: usersFile.Name(), + Realm: test.realm, + } + + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "traefik") + }) + + authenticator, err := NewBasic(context.Background(), next, authenticatorConfiguration, "authName") + require.NoError(t, err) + + ts := httptest.NewServer(authenticator) + defer ts.Close() + + for userName, userPwd := range test.expectedUsers { + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + req.SetBasicAuth(userName, userPwd) + + var res *http.Response + res, err = http.DefaultClient.Do(req) + require.NoError(t, err) + + require.Equal(t, http.StatusOK, res.StatusCode, "Cannot authenticate user "+userName) + + var body []byte + body, err = ioutil.ReadAll(res.Body) + require.NoError(t, err) + err = res.Body.Close() + require.NoError(t, err) + + require.Equal(t, "traefik\n", string(body)) + } + + // Checks that user foo doesn't work + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + req.SetBasicAuth("foo", "foo") + + res, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + require.Equal(t, http.StatusUnauthorized, res.StatusCode) + if len(test.realm) > 0 { + require.Equal(t, `Basic realm="`+test.realm+`"`, res.Header.Get("WWW-Authenticate")) + } + + body, err := ioutil.ReadAll(res.Body) + require.NoError(t, err) + err = res.Body.Close() + require.NoError(t, err) + + require.NotContains(t, "traefik", string(body)) + }) + } +} diff --git a/middlewares/auth/digest_auth.go b/middlewares/auth/digest_auth.go new file mode 100644 index 000000000..090285d46 --- /dev/null +++ b/middlewares/auth/digest_auth.go @@ -0,0 +1,102 @@ +package auth + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + + goauth "github.com/abbot/go-http-auth" + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/middlewares/accesslog" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" +) + +const ( + digestTypeName = "digestAuth" +) + +type digestAuth struct { + next http.Handler + auth *goauth.DigestAuth + users map[string]string + headerField string + removeHeader bool + name string +} + +// NewDigest creates a digest auth middleware. +func NewDigest(ctx context.Context, next http.Handler, authConfig config.DigestAuth, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, digestTypeName).Debug("Creating middleware") + users, err := getUsers(authConfig.UsersFile, authConfig.Users, digestUserParser) + if err != nil { + return nil, err + } + + da := &digestAuth{ + next: next, + users: users, + headerField: authConfig.HeaderField, + removeHeader: authConfig.RemoveHeader, + name: name, + } + + realm := defaultRealm + if len(authConfig.Realm) > 0 { + realm = authConfig.Realm + } + da.auth = goauth.NewDigestAuthenticator(realm, da.secretDigest) + + return da, nil +} + +func (d *digestAuth) GetTracingInformation() (string, ext.SpanKindEnum) { + return d.name, tracing.SpanKindNoneEnum +} + +func (d *digestAuth) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + logger := middlewares.GetLogger(req.Context(), d.name, digestTypeName) + + if username, _ := d.auth.CheckAuth(req); username == "" { + logger.Debug("Digest authentication failed") + tracing.SetErrorWithEvent(req, "Digest authentication failed") + d.auth.RequireAuth(rw, req) + } else { + logger.Debug("Digest authentication succeeded") + req.URL.User = url.User(username) + + logData := accesslog.GetLogData(req) + if logData != nil { + logData.Core[accesslog.ClientUsername] = username + } + + if d.headerField != "" { + req.Header[d.headerField] = []string{username} + } + + if d.removeHeader { + logger.Debug("Removing the Authorization header") + req.Header.Del(authorizationHeader) + } + d.next.ServeHTTP(rw, req) + } +} + +func (d *digestAuth) secretDigest(user, realm string) string { + if secret, ok := d.users[user+":"+realm]; ok { + return secret + } + + return "" +} + +func digestUserParser(user string) (string, string, error) { + split := strings.Split(user, ":") + if len(split) != 3 { + return "", "", fmt.Errorf("error parsing DigestUser: %v", user) + } + return split[0] + ":" + split[1], split[2], nil +} diff --git a/middlewares/auth/digest_auth_request_test.go b/middlewares/auth/digest_auth_request_test.go new file mode 100644 index 000000000..c9dff7d1e --- /dev/null +++ b/middlewares/auth/digest_auth_request_test.go @@ -0,0 +1,141 @@ +package auth + +import ( + "crypto/md5" + "crypto/rand" + "encoding/hex" + "fmt" + "io" + "net/http" + "strings" +) + +const ( + algorithm = "algorithm" + authorization = "Authorization" + nonce = "nonce" + opaque = "opaque" + qop = "qop" + realm = "realm" + wwwAuthenticate = "Www-Authenticate" +) + +// DigestRequest is a client for digest authentication requests +type digestRequest struct { + client *http.Client + username, password string + nonceCount nonceCount +} + +type nonceCount int + +func (nc nonceCount) String() string { + return fmt.Sprintf("%08x", int(nc)) +} + +var wanted = []string{algorithm, nonce, opaque, qop, realm} + +// New makes a DigestRequest instance +func newDigestRequest(username, password string, client *http.Client) *digestRequest { + return &digestRequest{ + client: client, + username: username, + password: password, + } +} + +// Do does requests as http.Do does +func (r *digestRequest) Do(req *http.Request) (*http.Response, error) { + parts, err := r.makeParts(req) + if err != nil { + return nil, err + } + + if parts != nil { + req.Header.Set(authorization, r.makeAuthorization(req, parts)) + } + + return r.client.Do(req) +} + +func (r *digestRequest) makeParts(req *http.Request) (map[string]string, error) { + authReq, err := http.NewRequest(req.Method, req.URL.String(), nil) + if err != nil { + return nil, err + } + resp, err := r.client.Do(authReq) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusUnauthorized { + return nil, nil + } + + if len(resp.Header[wwwAuthenticate]) == 0 { + return nil, fmt.Errorf("headers do not have %s", wwwAuthenticate) + } + + headers := strings.Split(resp.Header[wwwAuthenticate][0], ",") + parts := make(map[string]string, len(wanted)) + for _, r := range headers { + for _, w := range wanted { + if strings.Contains(r, w) { + parts[w] = strings.Split(r, `"`)[1] + } + } + } + + if len(parts) != len(wanted) { + return nil, fmt.Errorf("header is invalid: %+v", parts) + } + + return parts, nil +} + +func getMD5(texts []string) string { + h := md5.New() + _, _ = io.WriteString(h, strings.Join(texts, ":")) + return hex.EncodeToString(h.Sum(nil)) +} + +func (r *digestRequest) getNonceCount() string { + r.nonceCount++ + return r.nonceCount.String() +} + +func (r *digestRequest) makeAuthorization(req *http.Request, parts map[string]string) string { + ha1 := getMD5([]string{r.username, parts[realm], r.password}) + ha2 := getMD5([]string{req.Method, req.URL.String()}) + cnonce := generateRandom(16) + nc := r.getNonceCount() + response := getMD5([]string{ + ha1, + parts[nonce], + nc, + cnonce, + parts[qop], + ha2, + }) + return fmt.Sprintf( + `Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, qop=%s, nc=%s, cnonce="%s", response="%s", opaque="%s"`, + r.username, + parts[realm], + parts[nonce], + req.URL.String(), + parts[algorithm], + parts[qop], + nc, + cnonce, + response, + parts[opaque], + ) +} + +// GenerateRandom generates random string +func generateRandom(n int) string { + b := make([]byte, 8) + _, _ = io.ReadFull(rand.Reader, b) + return fmt.Sprintf("%x", b)[:n] +} diff --git a/middlewares/auth/digest_auth_test.go b/middlewares/auth/digest_auth_test.go new file mode 100644 index 000000000..b2a6e9840 --- /dev/null +++ b/middlewares/auth/digest_auth_test.go @@ -0,0 +1,156 @@ +package auth + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDigestAuthError(t *testing.T) { + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "traefik") + }) + + auth := config.DigestAuth{ + Users: []string{"test"}, + } + _, err := NewDigest(context.Background(), next, auth, "authName") + assert.Error(t, err) +} + +func TestDigestAuthFail(t *testing.T) { + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "traefik") + }) + + auth := config.DigestAuth{ + Users: []string{"test:traefik:a2688e031edb4be6a3797f3882655c05"}, + } + authMiddleware, err := NewDigest(context.Background(), next, auth, "authName") + require.NoError(t, err) + assert.NotNil(t, authMiddleware, "this should not be nil") + + ts := httptest.NewServer(authMiddleware) + defer ts.Close() + + client := http.DefaultClient + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + req.SetBasicAuth("test", "test") + + res, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusUnauthorized, res.StatusCode) +} + +func TestDigestAuthUsersFromFile(t *testing.T) { + testCases := []struct { + desc string + userFileContent string + expectedUsers map[string]string + givenUsers []string + realm string + }{ + { + desc: "Finds the users in the file", + userFileContent: "test:traefik:a2688e031edb4be6a3797f3882655c05\ntest2:traefik:518845800f9e2bfb1f1f740ec24f074e\n", + givenUsers: []string{}, + expectedUsers: map[string]string{"test": "test", "test2": "test2"}, + }, + { + desc: "Merges given users with users from the file", + userFileContent: "test:traefik:a2688e031edb4be6a3797f3882655c05\n", + givenUsers: []string{"test2:traefik:518845800f9e2bfb1f1f740ec24f074e", "test3:traefik:c8e9f57ce58ecb4424407f665a91646c"}, + expectedUsers: map[string]string{"test": "test", "test2": "test2", "test3": "test3"}, + }, + { + desc: "Given users have priority over users in the file", + userFileContent: "test:traefik:a2688e031edb4be6a3797f3882655c05\ntest2:traefik:518845800f9e2bfb1f1f740ec24f074e\n", + givenUsers: []string{"test2:traefik:8de60a1c52da68ccf41f0c0ffb7c51a0"}, + expectedUsers: map[string]string{"test": "test", "test2": "overridden"}, + }, + { + desc: "Should authenticate the correct user based on the realm", + userFileContent: "test:traefik:a2688e031edb4be6a3797f3882655c05\ntest:traefikee:316a669c158c8b7ab1048b03961a7aa5\n", + givenUsers: []string{}, + expectedUsers: map[string]string{"test": "test2"}, + realm: "traefikee", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + // Creates the temporary configuration file with the users + usersFile, err := ioutil.TempFile("", "auth-users") + require.NoError(t, err) + defer os.Remove(usersFile.Name()) + + _, err = usersFile.Write([]byte(test.userFileContent)) + require.NoError(t, err) + + // Creates the configuration for our Authenticator + authenticatorConfiguration := config.DigestAuth{ + Users: test.givenUsers, + UsersFile: usersFile.Name(), + Realm: test.realm, + } + + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "traefik") + }) + + authenticator, err := NewDigest(context.Background(), next, authenticatorConfiguration, "authName") + require.NoError(t, err) + + ts := httptest.NewServer(authenticator) + defer ts.Close() + + for userName, userPwd := range test.expectedUsers { + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + digestRequest := newDigestRequest(userName, userPwd, http.DefaultClient) + + var res *http.Response + res, err = digestRequest.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusOK, res.StatusCode, "Cannot authenticate user "+userName) + + var body []byte + body, err = ioutil.ReadAll(res.Body) + require.NoError(t, err) + err = res.Body.Close() + require.NoError(t, err) + + require.Equal(t, "traefik\n", string(body)) + } + + // Checks that user foo doesn't work + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + digestRequest := newDigestRequest("foo", "foo", http.DefaultClient) + + var res *http.Response + res, err = digestRequest.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusUnauthorized, res.StatusCode) + + var body []byte + body, err = ioutil.ReadAll(res.Body) + require.NoError(t, err) + err = res.Body.Close() + require.NoError(t, err) + + require.NotContains(t, "traefik", string(body)) + }) + } +} diff --git a/middlewares/auth/forward.go b/middlewares/auth/forward.go index 8c62f6a7c..ad310eea1 100644 --- a/middlewares/auth/forward.go +++ b/middlewares/auth/forward.go @@ -1,25 +1,68 @@ package auth import ( + "context" + "crypto/tls" + "fmt" "io/ioutil" "net" "net/http" "strings" - "github.com/containous/traefik/log" - "github.com/containous/traefik/middlewares/tracing" - "github.com/containous/traefik/types" + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" "github.com/vulcand/oxy/forward" "github.com/vulcand/oxy/utils" ) const ( - xForwardedURI = "X-Forwarded-Uri" - xForwardedMethod = "X-Forwarded-Method" + xForwardedURI = "X-Forwarded-Uri" + xForwardedMethod = "X-Forwarded-Method" + forwardedTypeName = "ForwardedAuthType" ) -// Forward the authentication to a external server -func Forward(config *types.Forward, w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { +type forwardAuth struct { + address string + authResponseHeaders []string + next http.Handler + name string + tlsConfig *tls.Config + trustForwardHeader bool +} + +// NewForward creates a forward auth middleware. +func NewForward(ctx context.Context, next http.Handler, config config.ForwardAuth, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, forwardedTypeName).Debug("Creating middleware") + + fa := &forwardAuth{ + address: config.Address, + authResponseHeaders: config.AuthResponseHeaders, + next: next, + name: name, + trustForwardHeader: config.TrustForwardHeader, + } + + if config.TLS != nil { + tlsConfig, err := config.TLS.CreateTLSConfig() + if err != nil { + return nil, err + } + + fa.tlsConfig = tlsConfig + } + + return fa, nil +} + +func (fa *forwardAuth) GetTracingInformation() (string, ext.SpanKindEnum) { + return fa.name, ext.SpanKindRPCClientEnum +} + +func (fa *forwardAuth) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + logger := middlewares.GetLogger(req.Context(), fa.name, forwardedTypeName) + // Ensure our request client does not follow redirects httpClient := http.Client{ CheckRedirect: func(r *http.Request, via []*http.Request) error { @@ -27,42 +70,44 @@ func Forward(config *types.Forward, w http.ResponseWriter, r *http.Request, next }, } - if config.TLS != nil { - tlsConfig, err := config.TLS.CreateTLSConfig() - if err != nil { - tracing.SetErrorAndDebugLog(r, "Unable to configure TLS to call %s. Cause %s", config.Address, err) - w.WriteHeader(http.StatusInternalServerError) - return - } - + if fa.tlsConfig != nil { httpClient.Transport = &http.Transport{ - TLSClientConfig: tlsConfig, + TLSClientConfig: fa.tlsConfig, } } - forwardReq, err := http.NewRequest(http.MethodGet, config.Address, http.NoBody) - tracing.LogRequest(tracing.GetSpan(r), forwardReq) + forwardReq, err := http.NewRequest(http.MethodGet, fa.address, nil) + tracing.LogRequest(tracing.GetSpan(req), forwardReq) if err != nil { - tracing.SetErrorAndDebugLog(r, "Error calling %s. Cause %s", config.Address, err) - w.WriteHeader(http.StatusInternalServerError) + logMessage := fmt.Sprintf("Error calling %s. Cause %s", fa.address, err) + logger.Debug(logMessage) + tracing.SetErrorWithEvent(req, logMessage) + + rw.WriteHeader(http.StatusInternalServerError) return } - writeHeader(r, forwardReq, config.TrustForwardHeader) + writeHeader(req, forwardReq, fa.trustForwardHeader) tracing.InjectRequestHeaders(forwardReq) forwardResponse, forwardErr := httpClient.Do(forwardReq) if forwardErr != nil { - tracing.SetErrorAndDebugLog(r, "Error calling %s. Cause: %s", config.Address, forwardErr) - w.WriteHeader(http.StatusInternalServerError) + logMessage := fmt.Sprintf("Error calling %s. Cause: %s", fa.address, forwardErr) + logger.Debug(logMessage) + tracing.SetErrorWithEvent(req, logMessage) + + rw.WriteHeader(http.StatusInternalServerError) return } body, readError := ioutil.ReadAll(forwardResponse.Body) if readError != nil { - tracing.SetErrorAndDebugLog(r, "Error reading body %s. Cause: %s", config.Address, readError) - w.WriteHeader(http.StatusInternalServerError) + logMessage := fmt.Sprintf("Error reading body %s. Cause: %s", fa.address, readError) + logger.Debug(logMessage) + tracing.SetErrorWithEvent(req, logMessage) + + rw.WriteHeader(http.StatusInternalServerError) return } defer forwardResponse.Body.Close() @@ -70,40 +115,43 @@ func Forward(config *types.Forward, w http.ResponseWriter, r *http.Request, next // Pass the forward response's body and selected headers if it // didn't return a response within the range of [200, 300). if forwardResponse.StatusCode < http.StatusOK || forwardResponse.StatusCode >= http.StatusMultipleChoices { - log.Debugf("Remote error %s. StatusCode: %d", config.Address, forwardResponse.StatusCode) + logger.Debugf("Remote error %s. StatusCode: %d", fa.address, forwardResponse.StatusCode) - utils.CopyHeaders(w.Header(), forwardResponse.Header) - utils.RemoveHeaders(w.Header(), forward.HopHeaders...) + utils.CopyHeaders(rw.Header(), forwardResponse.Header) + utils.RemoveHeaders(rw.Header(), forward.HopHeaders...) // Grab the location header, if any. redirectURL, err := forwardResponse.Location() if err != nil { if err != http.ErrNoLocation { - tracing.SetErrorAndDebugLog(r, "Error reading response location header %s. Cause: %s", config.Address, err) - w.WriteHeader(http.StatusInternalServerError) + logMessage := fmt.Sprintf("Error reading response location header %s. Cause: %s", fa.address, err) + logger.Debug(logMessage) + tracing.SetErrorWithEvent(req, logMessage) + + rw.WriteHeader(http.StatusInternalServerError) return } } else if redirectURL.String() != "" { // Set the location in our response if one was sent back. - w.Header().Set("Location", redirectURL.String()) + rw.Header().Set("Location", redirectURL.String()) } - tracing.LogResponseCode(tracing.GetSpan(r), forwardResponse.StatusCode) - w.WriteHeader(forwardResponse.StatusCode) + tracing.LogResponseCode(tracing.GetSpan(req), forwardResponse.StatusCode) + rw.WriteHeader(forwardResponse.StatusCode) - if _, err = w.Write(body); err != nil { - log.Error(err) + if _, err = rw.Write(body); err != nil { + logger.Error(err) } return } - for _, headerName := range config.AuthResponseHeaders { - r.Header.Set(headerName, forwardResponse.Header.Get(headerName)) + for _, headerName := range fa.authResponseHeaders { + req.Header.Set(headerName, forwardResponse.Header.Get(headerName)) } - r.RequestURI = r.URL.RequestURI() - next(w, r) + req.RequestURI = req.URL.RequestURI() + fa.next.ServeHTTP(rw, req) } func writeHeader(req *http.Request, forwardReq *http.Request, trustForwardHeader bool) { diff --git a/middlewares/auth/forward_test.go b/middlewares/auth/forward_test.go index a0364a030..5512434f1 100644 --- a/middlewares/auth/forward_test.go +++ b/middlewares/auth/forward_test.go @@ -1,50 +1,49 @@ package auth import ( + "context" "fmt" "io/ioutil" "net/http" "net/http/httptest" "testing" - "github.com/containous/traefik/middlewares/tracing" + "github.com/containous/traefik/config" "github.com/containous/traefik/testhelpers" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/urfave/negroni" "github.com/vulcand/oxy/forward" ) func TestForwardAuthFail(t *testing.T) { + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "traefik") + }) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, "Forbidden", http.StatusForbidden) })) defer server.Close() - middleware, err := NewAuthenticator(&types.Auth{ - Forward: &types.Forward{ - Address: server.URL, - }, - }, &tracing.Tracing{}) - assert.NoError(t, err, "there should be no error") + middleware, err := NewForward(context.Background(), next, config.ForwardAuth{ + Address: server.URL, + }, "authTest") + require.NoError(t, err) - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "traefik") - }) - n := negroni.New(middleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) + ts := httptest.NewServer(middleware) defer ts.Close() req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) res, err := http.DefaultClient.Do(req) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, http.StatusForbidden, res.StatusCode, "they should be equal") + require.NoError(t, err) + assert.Equal(t, http.StatusForbidden, res.StatusCode) body, err := ioutil.ReadAll(res.Body) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, "Forbidden\n", string(body), "they should be equal") + require.NoError(t, err) + err = res.Body.Close() + require.NoError(t, err) + + assert.Equal(t, "Forbidden\n", string(body)) } func TestForwardAuthSuccess(t *testing.T) { @@ -55,32 +54,32 @@ func TestForwardAuthSuccess(t *testing.T) { })) defer server.Close() - middleware, err := NewAuthenticator(&types.Auth{ - Forward: &types.Forward{ - Address: server.URL, - AuthResponseHeaders: []string{"X-Auth-User"}, - }, - }, &tracing.Tracing{}) - assert.NoError(t, err, "there should be no error") - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "user@example.com", r.Header.Get("X-Auth-User")) assert.Empty(t, r.Header.Get("X-Auth-Secret")) fmt.Fprintln(w, "traefik") }) - n := negroni.New(middleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) + + auth := config.ForwardAuth{ + Address: server.URL, + AuthResponseHeaders: []string{"X-Auth-User"}, + } + middleware, err := NewForward(context.Background(), next, auth, "authTest") + require.NoError(t, err) + + ts := httptest.NewServer(middleware) defer ts.Close() req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) res, err := http.DefaultClient.Do(req) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, http.StatusOK, res.StatusCode, "they should be equal") + require.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) body, err := ioutil.ReadAll(res.Body) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, "traefik\n", string(body), "they should be equal") + require.NoError(t, err) + err = res.Body.Close() + require.NoError(t, err) + assert.Equal(t, "traefik\n", string(body)) } func TestForwardAuthRedirect(t *testing.T) { @@ -89,19 +88,17 @@ func TestForwardAuthRedirect(t *testing.T) { })) defer authTs.Close() - authMiddleware, err := NewAuthenticator(&types.Auth{ - Forward: &types.Forward{ - Address: authTs.URL, - }, - }, &tracing.Tracing{}) - assert.NoError(t, err, "there should be no error") - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "traefik") }) - n := negroni.New(authMiddleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) + + auth := config.ForwardAuth{ + Address: authTs.URL, + } + authMiddleware, err := NewForward(context.Background(), next, auth, "authTest") + require.NoError(t, err) + + ts := httptest.NewServer(authMiddleware) defer ts.Close() client := &http.Client{ @@ -109,18 +106,23 @@ func TestForwardAuthRedirect(t *testing.T) { return http.ErrUseLastResponse }, } + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + res, err := client.Do(req) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, http.StatusFound, res.StatusCode, "they should be equal") + require.NoError(t, err) + + assert.Equal(t, http.StatusFound, res.StatusCode) location, err := res.Location() - assert.NoError(t, err, "there should be no error") - assert.Equal(t, "http://example.com/redirect-test", location.String(), "they should be equal") + require.NoError(t, err) + assert.Equal(t, "http://example.com/redirect-test", location.String()) body, err := ioutil.ReadAll(res.Body) - assert.NoError(t, err, "there should be no error") - assert.NotEmpty(t, string(body), "there should be something in the body") + require.NoError(t, err) + err = res.Body.Close() + require.NoError(t, err) + assert.NotEmpty(t, string(body)) } func TestForwardAuthRemoveHopByHopHeaders(t *testing.T) { @@ -138,19 +140,17 @@ func TestForwardAuthRemoveHopByHopHeaders(t *testing.T) { })) defer authTs.Close() - authMiddleware, err := NewAuthenticator(&types.Auth{ - Forward: &types.Forward{ - Address: authTs.URL, - }, - }, &tracing.Tracing{}) - assert.NoError(t, err, "there should be no error") - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "traefik") }) - n := negroni.New(authMiddleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) + auth := config.ForwardAuth{ + Address: authTs.URL, + } + authMiddleware, err := NewForward(context.Background(), next, auth, "authTest") + + assert.NoError(t, err, "there should be no error") + + ts := httptest.NewServer(authMiddleware) defer ts.Close() client := &http.Client{ @@ -185,30 +185,28 @@ func TestForwardAuthFailResponseHeaders(t *testing.T) { })) defer authTs.Close() - authMiddleware, err := NewAuthenticator(&types.Auth{ - Forward: &types.Forward{ - Address: authTs.URL, - }, - }, &tracing.Tracing{}) - assert.NoError(t, err, "there should be no error") - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "traefik") }) - n := negroni.New(authMiddleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) + + auth := config.ForwardAuth{ + Address: authTs.URL, + } + authMiddleware, err := NewForward(context.Background(), next, auth, "authTest") + require.NoError(t, err) + + ts := httptest.NewServer(authMiddleware) defer ts.Close() req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - client := &http.Client{} - res, err := client.Do(req) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, http.StatusForbidden, res.StatusCode, "they should be equal") + + res, err := http.DefaultClient.Do(req) + require.NoError(t, err) + assert.Equal(t, http.StatusForbidden, res.StatusCode) require.Len(t, res.Cookies(), 1) for _, cookie := range res.Cookies() { - assert.Equal(t, "testing", cookie.Value, "they should be equal") + assert.Equal(t, "testing", cookie.Value) } expectedHeaders := http.Header{ @@ -225,8 +223,11 @@ func TestForwardAuthFailResponseHeaders(t *testing.T) { } body, err := ioutil.ReadAll(res.Body) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, "Forbidden\n", string(body), "they should be equal") + require.NoError(t, err) + err = res.Body.Close() + require.NoError(t, err) + + assert.Equal(t, "Forbidden\n", string(body)) } func Test_writeHeader(t *testing.T) { diff --git a/middlewares/buffering/buffering.go b/middlewares/buffering/buffering.go new file mode 100644 index 000000000..39f0343f7 --- /dev/null +++ b/middlewares/buffering/buffering.go @@ -0,0 +1,54 @@ +package buffering + +import ( + "context" + "net/http" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" + oxybuffer "github.com/vulcand/oxy/buffer" +) + +const ( + typeName = "Buffer" +) + +type buffer struct { + name string + buffer *oxybuffer.Buffer +} + +// New creates a buffering middleware. +func New(ctx context.Context, next http.Handler, config config.Buffering, name string) (http.Handler, error) { + logger := middlewares.GetLogger(ctx, name, typeName) + logger.Debug("Creating middleware") + logger.Debug("Setting up buffering: request limits: %d (mem), %d (max), response limits: %d (mem), %d (max) with retry: '%s'", + config.MemRequestBodyBytes, config.MaxRequestBodyBytes, config.MemResponseBodyBytes, config.MaxResponseBodyBytes, config.RetryExpression) + + oxyBuffer, err := oxybuffer.New( + next, + oxybuffer.MemRequestBodyBytes(config.MemRequestBodyBytes), + oxybuffer.MaxRequestBodyBytes(config.MaxRequestBodyBytes), + oxybuffer.MemResponseBodyBytes(config.MemResponseBodyBytes), + oxybuffer.MaxResponseBodyBytes(config.MaxResponseBodyBytes), + oxybuffer.CondSetter(len(config.RetryExpression) > 0, oxybuffer.Retry(config.RetryExpression)), + ) + if err != nil { + return nil, err + } + + return &buffer{ + name: name, + buffer: oxyBuffer, + }, nil +} + +func (b *buffer) GetTracingInformation() (string, ext.SpanKindEnum) { + return b.name, tracing.SpanKindNoneEnum +} + +func (b *buffer) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + b.buffer.ServeHTTP(rw, req) +} diff --git a/middlewares/chain/chain.go b/middlewares/chain/chain.go new file mode 100644 index 000000000..3ba4208a0 --- /dev/null +++ b/middlewares/chain/chain.go @@ -0,0 +1,30 @@ +package chain + +import ( + "context" + "net/http" + + "github.com/containous/alice" + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" +) + +const ( + typeName = "Chain" +) + +type chainBuilder interface { + BuildChain(ctx context.Context, middlewares []string) (*alice.Chain, error) +} + +// New creates a chain middleware +func New(ctx context.Context, next http.Handler, config config.Chain, builder chainBuilder, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + + middlewareChain, err := builder.BuildChain(ctx, config.Middlewares) + if err != nil { + return nil, err + } + + return middlewareChain.Then(next) +} diff --git a/middlewares/circuitbreaker/circuit_breaker.go b/middlewares/circuitbreaker/circuit_breaker.go new file mode 100644 index 000000000..625ffae30 --- /dev/null +++ b/middlewares/circuitbreaker/circuit_breaker.go @@ -0,0 +1,61 @@ +package circuitbreaker + +import ( + "context" + "net/http" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/log" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" + "github.com/vulcand/oxy/cbreaker" +) + +const ( + typeName = "CircuitBreaker" +) + +type circuitBreaker struct { + circuitBreaker *cbreaker.CircuitBreaker + name string +} + +// New creates a new circuit breaker middleware. +func New(ctx context.Context, next http.Handler, confCircuitBreaker config.CircuitBreaker, name string) (http.Handler, error) { + expression := confCircuitBreaker.Expression + + logger := middlewares.GetLogger(ctx, name, typeName) + logger.Debug("Creating middleware") + logger.Debug("Setting up with expression: %s", expression) + + oxyCircuitBreaker, err := cbreaker.New(next, expression, createCircuitBreakerOptions(expression)) + if err != nil { + return nil, err + } + return &circuitBreaker{ + circuitBreaker: oxyCircuitBreaker, + name: name, + }, nil +} + +// NewCircuitBreakerOptions returns a new CircuitBreakerOption +func createCircuitBreakerOptions(expression string) cbreaker.CircuitBreakerOption { + return cbreaker.Fallback(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + tracing.SetErrorWithEvent(req, "blocked by circuit-breaker (%q)", expression) + rw.WriteHeader(http.StatusServiceUnavailable) + + if _, err := rw.Write([]byte(http.StatusText(http.StatusServiceUnavailable))); err != nil { + log.FromContext(req.Context()).Error(err) + } + })) +} + +func (c *circuitBreaker) GetTracingInformation() (string, ext.SpanKindEnum) { + return c.name, tracing.SpanKindNoneEnum +} + +func (c *circuitBreaker) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + middlewares.GetLogger(req.Context(), c.name, typeName).Debug("Entering middleware") + c.circuitBreaker.ServeHTTP(rw, req) +} diff --git a/middlewares/compress/compress.go b/middlewares/compress/compress.go new file mode 100644 index 000000000..6b89092d1 --- /dev/null +++ b/middlewares/compress/compress.go @@ -0,0 +1,58 @@ +package compress + +import ( + "compress/gzip" + "context" + "net/http" + "strings" + + "github.com/NYTimes/gziphandler" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" + "github.com/sirupsen/logrus" +) + +const ( + typeName = "Compress" +) + +// Compress is a middleware that allows to compress the response. +type compress struct { + next http.Handler + name string +} + +// New creates a new compress middleware. +func New(ctx context.Context, next http.Handler, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + + return &compress{ + next: next, + name: name, + }, nil +} + +func (c *compress) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + contentType := req.Header.Get("Content-Type") + if strings.HasPrefix(contentType, "application/grpc") { + c.next.ServeHTTP(rw, req) + } else { + gzipHandler(c.next, middlewares.GetLogger(req.Context(), c.name, typeName)).ServeHTTP(rw, req) + } +} + +func (c *compress) GetTracingInformation() (string, ext.SpanKindEnum) { + return c.name, tracing.SpanKindNoneEnum +} + +func gzipHandler(h http.Handler, logger logrus.FieldLogger) http.Handler { + wrapper, err := gziphandler.GzipHandlerWithOpts( + gziphandler.CompressionLevel(gzip.DefaultCompression), + gziphandler.MinSize(gziphandler.DefaultMinSize)) + if err != nil { + logger.Error(err) + } + + return wrapper(h) +} diff --git a/middlewares/compress/compress_test.go b/middlewares/compress/compress_test.go new file mode 100644 index 000000000..dc94b4227 --- /dev/null +++ b/middlewares/compress/compress_test.go @@ -0,0 +1,260 @@ +package compress + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/NYTimes/gziphandler" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + acceptEncodingHeader = "Accept-Encoding" + contentEncodingHeader = "Content-Encoding" + contentTypeHeader = "Content-Type" + varyHeader = "Vary" + gzipValue = "gzip" +) + +func TestShouldCompressWhenNoContentEncodingHeader(t *testing.T) { + req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost", nil) + req.Header.Add(acceptEncodingHeader, gzipValue) + + baseBody := generateBytes(gziphandler.DefaultMinSize) + + next := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + _, err := rw.Write(baseBody) + assert.NoError(t, err) + }) + handler := &compress{next: next} + + rw := httptest.NewRecorder() + handler.ServeHTTP(rw, req) + + assert.Equal(t, gzipValue, rw.Header().Get(contentEncodingHeader)) + assert.Equal(t, acceptEncodingHeader, rw.Header().Get(varyHeader)) + + if assert.ObjectsAreEqualValues(rw.Body.Bytes(), baseBody) { + assert.Fail(t, "expected a compressed body", "got %v", rw.Body.Bytes()) + } +} + +func TestShouldNotCompressWhenContentEncodingHeader(t *testing.T) { + req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost", nil) + req.Header.Add(acceptEncodingHeader, gzipValue) + + fakeCompressedBody := generateBytes(gziphandler.DefaultMinSize) + next := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Add(contentEncodingHeader, gzipValue) + rw.Header().Add(varyHeader, acceptEncodingHeader) + _, err := rw.Write(fakeCompressedBody) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + }) + handler := &compress{next: next} + + rw := httptest.NewRecorder() + handler.ServeHTTP(rw, req) + + assert.Equal(t, gzipValue, rw.Header().Get(contentEncodingHeader)) + assert.Equal(t, acceptEncodingHeader, rw.Header().Get(varyHeader)) + + assert.EqualValues(t, rw.Body.Bytes(), fakeCompressedBody) +} + +func TestShouldNotCompressWhenNoAcceptEncodingHeader(t *testing.T) { + req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost", nil) + + fakeBody := generateBytes(gziphandler.DefaultMinSize) + next := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + _, err := rw.Write(fakeBody) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + }) + handler := &compress{next: next} + + rw := httptest.NewRecorder() + handler.ServeHTTP(rw, req) + + assert.Empty(t, rw.Header().Get(contentEncodingHeader)) + assert.EqualValues(t, rw.Body.Bytes(), fakeBody) +} + +func TestShouldNotCompressWhenGRPC(t *testing.T) { + req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost", nil) + req.Header.Add(acceptEncodingHeader, gzipValue) + req.Header.Add(contentTypeHeader, "application/grpc") + + baseBody := generateBytes(gziphandler.DefaultMinSize) + next := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + _, err := rw.Write(baseBody) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + }) + handler := &compress{next: next} + + rw := httptest.NewRecorder() + handler.ServeHTTP(rw, req) + + assert.Empty(t, rw.Header().Get(acceptEncodingHeader)) + assert.Empty(t, rw.Header().Get(contentEncodingHeader)) + assert.EqualValues(t, rw.Body.Bytes(), baseBody) +} + +func TestIntegrationShouldNotCompress(t *testing.T) { + fakeCompressedBody := generateBytes(100000) + + testCases := []struct { + name string + handler http.Handler + expectedStatusCode int + }{ + { + name: "when content already compressed", + handler: http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Add(contentEncodingHeader, gzipValue) + rw.Header().Add(varyHeader, acceptEncodingHeader) + _, err := rw.Write(fakeCompressedBody) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + }), + expectedStatusCode: http.StatusOK, + }, + { + name: "when content already compressed and status code Created", + handler: http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Add(contentEncodingHeader, gzipValue) + rw.Header().Add(varyHeader, acceptEncodingHeader) + rw.WriteHeader(http.StatusCreated) + _, err := rw.Write(fakeCompressedBody) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + }), + expectedStatusCode: http.StatusCreated, + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + compress := &compress{next: test.handler} + ts := httptest.NewServer(compress) + defer ts.Close() + + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + req.Header.Add(acceptEncodingHeader, gzipValue) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + assert.Equal(t, test.expectedStatusCode, resp.StatusCode) + + assert.Equal(t, gzipValue, resp.Header.Get(contentEncodingHeader)) + assert.Equal(t, acceptEncodingHeader, resp.Header.Get(varyHeader)) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.EqualValues(t, fakeCompressedBody, body) + }) + } +} + +func TestShouldWriteHeaderWhenFlush(t *testing.T) { + next := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Add(contentEncodingHeader, gzipValue) + rw.Header().Add(varyHeader, acceptEncodingHeader) + rw.WriteHeader(http.StatusUnauthorized) + rw.(http.Flusher).Flush() + _, err := rw.Write([]byte("short")) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + }) + handler := &compress{next: next} + ts := httptest.NewServer(handler) + defer ts.Close() + + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + req.Header.Add(acceptEncodingHeader, gzipValue) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + + assert.Equal(t, gzipValue, resp.Header.Get(contentEncodingHeader)) + assert.Equal(t, acceptEncodingHeader, resp.Header.Get(varyHeader)) +} + +func TestIntegrationShouldCompress(t *testing.T) { + fakeBody := generateBytes(100000) + + testCases := []struct { + name string + handler http.Handler + expectedStatusCode int + }{ + { + name: "when AcceptEncoding header is present", + handler: http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + _, err := rw.Write(fakeBody) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + }), + expectedStatusCode: http.StatusOK, + }, + { + name: "when AcceptEncoding header is present and status code Created", + handler: http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(http.StatusCreated) + _, err := rw.Write(fakeBody) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + }), + expectedStatusCode: http.StatusCreated, + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + compress := &compress{next: test.handler} + ts := httptest.NewServer(compress) + defer ts.Close() + + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + req.Header.Add(acceptEncodingHeader, gzipValue) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + assert.Equal(t, test.expectedStatusCode, resp.StatusCode) + + assert.Equal(t, gzipValue, resp.Header.Get(contentEncodingHeader)) + assert.Equal(t, acceptEncodingHeader, resp.Header.Get(varyHeader)) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + if assert.ObjectsAreEqualValues(body, fakeBody) { + assert.Fail(t, "expected a compressed body", "got %v", body) + } + }) + } +} + +func generateBytes(len int) []byte { + var value []byte + for i := 0; i < len; i++ { + value = append(value, 0x61+byte(i)) + } + return value +} diff --git a/middlewares/customerrors/custom_errors.go b/middlewares/customerrors/custom_errors.go new file mode 100644 index 000000000..796865c78 --- /dev/null +++ b/middlewares/customerrors/custom_errors.go @@ -0,0 +1,248 @@ +package customerrors + +import ( + "bufio" + "bytes" + "context" + "fmt" + "net" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/old/types" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" + "github.com/sirupsen/logrus" + "github.com/vulcand/oxy/utils" +) + +// Compile time validation that the response recorder implements http interfaces correctly. +var _ middlewares.Stateful = &responseRecorderWithCloseNotify{} + +const ( + typeName = "customError" + backendURL = "http://0.0.0.0" +) + +type serviceBuilder interface { + Build(ctx context.Context, serviceName string, responseModifier func(*http.Response) error) (http.Handler, error) +} + +// customErrors is a middleware that provides the custom error pages.. +type customErrors struct { + name string + next http.Handler + backendHandler http.Handler + httpCodeRanges types.HTTPCodeRanges + backendQuery string +} + +// New creates a new custom error pages middleware. +func New(ctx context.Context, next http.Handler, config config.ErrorPage, serviceBuilder serviceBuilder, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + + httpCodeRanges, err := types.NewHTTPCodeRanges(config.Status) + if err != nil { + return nil, err + } + + backend, err := serviceBuilder.Build(ctx, config.Service, nil) + if err != nil { + return nil, err + } + + return &customErrors{ + name: name, + next: next, + backendHandler: backend, + httpCodeRanges: httpCodeRanges, + backendQuery: config.Query, + }, nil +} + +func (c *customErrors) GetTracingInformation() (string, ext.SpanKindEnum) { + return c.name, tracing.SpanKindNoneEnum +} + +func (c *customErrors) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + logger := middlewares.GetLogger(req.Context(), c.name, typeName) + + if c.backendHandler == nil { + logger.Error("Error pages: no backend handler.") + tracing.SetErrorWithEvent(req, "Error pages: no backend handler.") + c.next.ServeHTTP(rw, req) + return + } + + recorder := newResponseRecorder(rw, middlewares.GetLogger(context.Background(), "test", typeName)) + c.next.ServeHTTP(recorder, req) + + // check the recorder code against the configured http status code ranges + for _, block := range c.httpCodeRanges { + if recorder.GetCode() >= block[0] && recorder.GetCode() <= block[1] { + logger.Errorf("Caught HTTP Status Code %d, returning error page", recorder.GetCode()) + + var query string + if len(c.backendQuery) > 0 { + query = "/" + strings.TrimPrefix(c.backendQuery, "/") + query = strings.Replace(query, "{status}", strconv.Itoa(recorder.GetCode()), -1) + } + + pageReq, err := newRequest(backendURL + query) + if err != nil { + logger.Error(err) + rw.WriteHeader(recorder.GetCode()) + _, err = fmt.Fprint(rw, http.StatusText(recorder.GetCode())) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + return + } + + recorderErrorPage := newResponseRecorder(rw, middlewares.GetLogger(context.Background(), "test", typeName)) + utils.CopyHeaders(pageReq.Header, req.Header) + + c.backendHandler.ServeHTTP(recorderErrorPage, pageReq.WithContext(req.Context())) + + utils.CopyHeaders(rw.Header(), recorderErrorPage.Header()) + rw.WriteHeader(recorder.GetCode()) + + if _, err = rw.Write(recorderErrorPage.GetBody().Bytes()); err != nil { + logger.Error(err) + } + return + } + } + + // did not catch a configured status code so proceed with the request + utils.CopyHeaders(rw.Header(), recorder.Header()) + rw.WriteHeader(recorder.GetCode()) + _, err := rw.Write(recorder.GetBody().Bytes()) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } +} + +func newRequest(baseURL string) (*http.Request, error) { + u, err := url.Parse(baseURL) + if err != nil { + return nil, fmt.Errorf("error pages: error when parse URL: %v", err) + } + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, fmt.Errorf("error pages: error when create query: %v", err) + } + + req.RequestURI = u.RequestURI() + return req, nil +} + +type responseRecorder interface { + http.ResponseWriter + http.Flusher + GetCode() int + GetBody() *bytes.Buffer + IsStreamingResponseStarted() bool +} + +// newResponseRecorder returns an initialized responseRecorder. +func newResponseRecorder(rw http.ResponseWriter, logger logrus.FieldLogger) responseRecorder { + recorder := &responseRecorderWithoutCloseNotify{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + Code: http.StatusOK, + responseWriter: rw, + logger: logger, + } + if _, ok := rw.(http.CloseNotifier); ok { + return &responseRecorderWithCloseNotify{recorder} + } + return recorder +} + +// responseRecorderWithoutCloseNotify is an implementation of http.ResponseWriter that +// records its mutations for later inspection. +type responseRecorderWithoutCloseNotify struct { + Code int // the HTTP response code from WriteHeader + HeaderMap http.Header // the HTTP response headers + Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to + + responseWriter http.ResponseWriter + err error + streamingResponseStarted bool + logger logrus.FieldLogger +} + +type responseRecorderWithCloseNotify struct { + *responseRecorderWithoutCloseNotify +} + +// CloseNotify returns a channel that receives at most a +// single value (true) when the client connection has gone away. +func (r *responseRecorderWithCloseNotify) CloseNotify() <-chan bool { + return r.responseWriter.(http.CloseNotifier).CloseNotify() +} + +// Header returns the response headers. +func (r *responseRecorderWithoutCloseNotify) Header() http.Header { + if r.HeaderMap == nil { + r.HeaderMap = make(http.Header) + } + + return r.HeaderMap +} + +func (r *responseRecorderWithoutCloseNotify) GetCode() int { + return r.Code +} + +func (r *responseRecorderWithoutCloseNotify) GetBody() *bytes.Buffer { + return r.Body +} + +func (r *responseRecorderWithoutCloseNotify) IsStreamingResponseStarted() bool { + return r.streamingResponseStarted +} + +// Write always succeeds and writes to rw.Body, if not nil. +func (r *responseRecorderWithoutCloseNotify) Write(buf []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + return r.Body.Write(buf) +} + +// WriteHeader sets rw.Code. +func (r *responseRecorderWithoutCloseNotify) WriteHeader(code int) { + r.Code = code +} + +// Hijack hijacks the connection +func (r *responseRecorderWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return r.responseWriter.(http.Hijacker).Hijack() +} + +// Flush sends any buffered data to the client. +func (r *responseRecorderWithoutCloseNotify) Flush() { + if !r.streamingResponseStarted { + utils.CopyHeaders(r.responseWriter.Header(), r.Header()) + r.responseWriter.WriteHeader(r.Code) + r.streamingResponseStarted = true + } + + _, err := r.responseWriter.Write(r.Body.Bytes()) + if err != nil { + r.logger.Errorf("Error writing response in responseRecorder: %v", err) + r.err = err + } + r.Body.Reset() + + if flusher, ok := r.responseWriter.(http.Flusher); ok { + flusher.Flush() + } +} diff --git a/middlewares/customerrors/custom_errors_test.go b/middlewares/customerrors/custom_errors_test.go new file mode 100644 index 000000000..3c9198f13 --- /dev/null +++ b/middlewares/customerrors/custom_errors_test.go @@ -0,0 +1,176 @@ +package customerrors + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHandler(t *testing.T) { + testCases := []struct { + desc string + errorPage *config.ErrorPage + backendCode int + backendErrorHandler http.HandlerFunc + validate func(t *testing.T, recorder *httptest.ResponseRecorder) + }{ + { + desc: "no error", + errorPage: &config.ErrorPage{Service: "error", Query: "/test", Status: []string{"500-501", "503-599"}}, + backendCode: http.StatusOK, + backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "My error page.") + }), + validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { + assert.Equal(t, http.StatusOK, recorder.Code, "HTTP status") + assert.Contains(t, recorder.Body.String(), http.StatusText(http.StatusOK)) + }, + }, + { + desc: "in the range", + errorPage: &config.ErrorPage{Service: "error", Query: "/test", Status: []string{"500-501", "503-599"}}, + backendCode: http.StatusInternalServerError, + backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "My error page.") + }), + validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { + assert.Equal(t, http.StatusInternalServerError, recorder.Code, "HTTP status") + assert.Contains(t, recorder.Body.String(), "My error page.") + assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page") + }, + }, + { + desc: "not in the range", + errorPage: &config.ErrorPage{Service: "error", Query: "/test", Status: []string{"500-501", "503-599"}}, + backendCode: http.StatusBadGateway, + backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "My error page.") + }), + validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { + assert.Equal(t, http.StatusBadGateway, recorder.Code, "HTTP status") + assert.Contains(t, recorder.Body.String(), http.StatusText(http.StatusBadGateway)) + assert.NotContains(t, recorder.Body.String(), "Test Server", "Should return the oops page since we have not configured the 502 code") + }, + }, + { + desc: "query replacement", + errorPage: &config.ErrorPage{Service: "error", Query: "/{status}", Status: []string{"503-503"}}, + backendCode: http.StatusServiceUnavailable, + backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/503" { + fmt.Fprintln(w, "My 503 page.") + } else { + fmt.Fprintln(w, "Failed") + } + }), + validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { + assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status") + assert.Contains(t, recorder.Body.String(), "My 503 page.") + assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page") + }, + }, + { + desc: "Single code", + errorPage: &config.ErrorPage{Service: "error", Query: "/{status}", Status: []string{"503"}}, + backendCode: http.StatusServiceUnavailable, + backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/503" { + fmt.Fprintln(w, "My 503 page.") + } else { + fmt.Fprintln(w, "Failed") + } + }), + validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { + assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status") + assert.Contains(t, recorder.Body.String(), "My 503 page.") + assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page") + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + serviceBuilderMock := &mockServiceBuilder{handler: test.backendErrorHandler} + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(test.backendCode) + fmt.Fprintln(w, http.StatusText(test.backendCode)) + }) + errorPageHandler, err := New(context.Background(), handler, *test.errorPage, serviceBuilderMock, "test") + require.NoError(t, err) + + req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost/test", nil) + + recorder := httptest.NewRecorder() + errorPageHandler.ServeHTTP(recorder, req) + + test.validate(t, recorder) + }) + } +} + +type mockServiceBuilder struct { + handler http.Handler +} + +func (m *mockServiceBuilder) Build(_ context.Context, serviceName string, responseModifier func(*http.Response) error) (http.Handler, error) { + return m.handler, nil +} + +func TestNewResponseRecorder(t *testing.T) { + testCases := []struct { + desc string + rw http.ResponseWriter + expected http.ResponseWriter + }{ + { + desc: "Without Close Notify", + rw: httptest.NewRecorder(), + expected: &responseRecorderWithoutCloseNotify{}, + }, + { + desc: "With Close Notify", + rw: &mockRWCloseNotify{}, + expected: &responseRecorderWithCloseNotify{}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + rec := newResponseRecorder(test.rw, middlewares.GetLogger(context.Background(), "test", typeName)) + assert.IsType(t, rec, test.expected) + }) + } +} + +type mockRWCloseNotify struct{} + +func (m *mockRWCloseNotify) CloseNotify() <-chan bool { + panic("implement me") +} + +func (m *mockRWCloseNotify) Header() http.Header { + panic("implement me") +} + +func (m *mockRWCloseNotify) Write([]byte) (int, error) { + panic("implement me") +} + +func (m *mockRWCloseNotify) WriteHeader(int) { + panic("implement me") +} diff --git a/middlewares/emptybackendhandler/empty_backend_handler.go b/middlewares/emptybackendhandler/empty_backend_handler.go new file mode 100644 index 000000000..36e3571c6 --- /dev/null +++ b/middlewares/emptybackendhandler/empty_backend_handler.go @@ -0,0 +1,33 @@ +package emptybackendhandler + +import ( + "net/http" + + "github.com/containous/traefik/healthcheck" +) + +// EmptyBackend is a middleware that checks whether the current Backend +// has at least one active Server in respect to the healthchecks and if this +// is not the case, it will stop the middleware chain and respond with 503. +type emptyBackend struct { + next healthcheck.BalancerHandler +} + +// New creates a new EmptyBackend middleware. +func New(lb healthcheck.BalancerHandler) http.Handler { + return &emptyBackend{next: lb} +} + +// ServeHTTP responds with 503 when there is no active Server and otherwise +// invokes the next handler in the middleware chain. +func (e *emptyBackend) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if len(e.next.Servers()) == 0 { + rw.WriteHeader(http.StatusServiceUnavailable) + _, err := rw.Write([]byte(http.StatusText(http.StatusServiceUnavailable))) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + } else { + e.next.ServeHTTP(rw, req) + } +} diff --git a/middlewares/emptybackendhandler/empty_backend_handler_test.go b/middlewares/emptybackendhandler/empty_backend_handler_test.go new file mode 100644 index 000000000..d7c18c99e --- /dev/null +++ b/middlewares/emptybackendhandler/empty_backend_handler_test.go @@ -0,0 +1,81 @@ +package emptybackendhandler + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/vulcand/oxy/roundrobin" +) + +func TestEmptyBackendHandler(t *testing.T) { + testCases := []struct { + amountServer int + expectedStatusCode int + }{ + { + amountServer: 0, + expectedStatusCode: http.StatusServiceUnavailable, + }, + { + amountServer: 1, + expectedStatusCode: http.StatusOK, + }, + } + + for _, test := range testCases { + test := test + t.Run(fmt.Sprintf("amount servers %d", test.amountServer), func(t *testing.T) { + t.Parallel() + + handler := New(&healthCheckLoadBalancer{amountServer: test.amountServer}) + + recorder := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "http://localhost", nil) + + handler.ServeHTTP(recorder, req) + + assert.Equal(t, test.expectedStatusCode, recorder.Result().StatusCode) + }) + } +} + +type healthCheckLoadBalancer struct { + amountServer int +} + +func (lb *healthCheckLoadBalancer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func (lb *healthCheckLoadBalancer) Servers() []*url.URL { + servers := make([]*url.URL, lb.amountServer) + for i := 0; i < lb.amountServer; i++ { + servers = append(servers, testhelpers.MustParseURL("http://localhost")) + } + return servers +} + +func (lb *healthCheckLoadBalancer) RemoveServer(u *url.URL) error { + return nil +} + +func (lb *healthCheckLoadBalancer) UpsertServer(u *url.URL, options ...roundrobin.ServerOption) error { + return nil +} + +func (lb *healthCheckLoadBalancer) ServerWeight(u *url.URL) (int, bool) { + return 0, false +} + +func (lb *healthCheckLoadBalancer) NextServer() (*url.URL, error) { + return nil, nil +} + +func (lb *healthCheckLoadBalancer) Next() http.Handler { + return nil +} diff --git a/middlewares/handler_switcher.go b/middlewares/handler_switcher.go new file mode 100644 index 000000000..bb5745b00 --- /dev/null +++ b/middlewares/handler_switcher.go @@ -0,0 +1,35 @@ +package middlewares + +import ( + "net/http" + + "github.com/containous/traefik/safe" +) + +// HandlerSwitcher allows hot switching of http.ServeMux +type HandlerSwitcher struct { + handler *safe.Safe +} + +// NewHandlerSwitcher builds a new instance of HandlerSwitcher +func NewHandlerSwitcher(newHandler http.Handler) (hs *HandlerSwitcher) { + return &HandlerSwitcher{ + handler: safe.New(newHandler), + } +} + +func (h *HandlerSwitcher) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + handlerBackup := h.handler.Get().(http.Handler) + handlerBackup.ServeHTTP(rw, req) +} + +// GetHandler returns the current http.ServeMux +func (h *HandlerSwitcher) GetHandler() (newHandler http.Handler) { + handler := h.handler.Get().(http.Handler) + return handler +} + +// UpdateHandler safely updates the current http.ServeMux with a new one +func (h *HandlerSwitcher) UpdateHandler(newHandler http.Handler) { + h.handler.Set(newHandler) +} diff --git a/middlewares/headers/headers.go b/middlewares/headers/headers.go new file mode 100644 index 000000000..fadaeedee --- /dev/null +++ b/middlewares/headers/headers.go @@ -0,0 +1,134 @@ +// Package headers Middleware based on https://github.com/unrolled/secure. +package headers + +import ( + "context" + "errors" + "net/http" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" + "github.com/unrolled/secure" +) + +const ( + typeName = "Headers" +) + +type headers struct { + name string + handler http.Handler +} + +// New creates a Headers middleware. +func New(ctx context.Context, next http.Handler, config config.Headers, name string) (http.Handler, error) { + // HeaderMiddleware -> SecureMiddleWare -> next + logger := middlewares.GetLogger(ctx, name, typeName) + logger.Debug("Creating middleware") + + if !config.HasSecureHeadersDefined() && !config.HasCustomHeadersDefined() { + return nil, errors.New("headers configuration not valid") + } + + var handler http.Handler + nextHandler := next + + if config.HasSecureHeadersDefined() { + logger.Debug("Setting up secureHeaders from %v", config) + handler = newSecure(next, config) + nextHandler = handler + } + + if config.HasCustomHeadersDefined() { + logger.Debug("Setting up customHeaders from %v", config) + handler = newHeader(nextHandler, config) + } + + return &headers{ + handler: handler, + name: name, + }, nil +} + +func (h *headers) GetTracingInformation() (string, ext.SpanKindEnum) { + return h.name, tracing.SpanKindNoneEnum +} + +func (h *headers) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + h.handler.ServeHTTP(rw, req) +} + +type secureHeader struct { + next http.Handler + secure *secure.Secure +} + +// newSecure constructs a new secure instance with supplied options. +func newSecure(next http.Handler, headers config.Headers) *secureHeader { + opt := secure.Options{ + BrowserXssFilter: headers.BrowserXSSFilter, + ContentTypeNosniff: headers.ContentTypeNosniff, + ForceSTSHeader: headers.ForceSTSHeader, + FrameDeny: headers.FrameDeny, + IsDevelopment: headers.IsDevelopment, + SSLRedirect: headers.SSLRedirect, + SSLForceHost: headers.SSLForceHost, + SSLTemporaryRedirect: headers.SSLTemporaryRedirect, + STSIncludeSubdomains: headers.STSIncludeSubdomains, + STSPreload: headers.STSPreload, + ContentSecurityPolicy: headers.ContentSecurityPolicy, + CustomBrowserXssValue: headers.CustomBrowserXSSValue, + CustomFrameOptionsValue: headers.CustomFrameOptionsValue, + PublicKey: headers.PublicKey, + ReferrerPolicy: headers.ReferrerPolicy, + SSLHost: headers.SSLHost, + AllowedHosts: headers.AllowedHosts, + HostsProxyHeaders: headers.HostsProxyHeaders, + SSLProxyHeaders: headers.SSLProxyHeaders, + STSSeconds: headers.STSSeconds, + } + + return &secureHeader{ + next: next, + secure: secure.New(opt), + } +} + +func (s secureHeader) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + s.secure.HandlerFuncWithNextForRequestOnly(rw, req, s.next.ServeHTTP) +} + +// Header is a middleware that helps setup a few basic security features. A single headerOptions struct can be +// provided to configure which features should be enabled, and the ability to override a few of the default values. +type header struct { + next http.Handler + // If Custom request headers are set, these will be added to the request + customRequestHeaders map[string]string +} + +// NewHeader constructs a new header instance from supplied frontend header struct. +func newHeader(next http.Handler, headers config.Headers) *header { + return &header{ + next: next, + customRequestHeaders: headers.CustomRequestHeaders, + } +} + +func (s *header) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + s.modifyRequestHeaders(req) + s.next.ServeHTTP(rw, req) +} + +// modifyRequestHeaders set or delete request headers. +func (s *header) modifyRequestHeaders(req *http.Request) { + // Loop through Custom request headers + for header, value := range s.customRequestHeaders { + if value == "" { + req.Header.Del(header) + } else { + req.Header.Set(header, value) + } + } +} diff --git a/middlewares/headers/headers_test.go b/middlewares/headers/headers_test.go new file mode 100644 index 000000000..e788f7b67 --- /dev/null +++ b/middlewares/headers/headers_test.go @@ -0,0 +1,105 @@ +package headers + +// Middleware tests based on https://github.com/unrolled/secure + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCustomRequestHeader(t *testing.T) { + emptyHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) + + header := newHeader(emptyHandler, config.Headers{ + CustomRequestHeaders: map[string]string{ + "X-Custom-Request-Header": "test_request", + }, + }) + + res := httptest.NewRecorder() + req := testhelpers.MustNewRequest(http.MethodGet, "/foo", nil) + + header.ServeHTTP(res, req) + + assert.Equal(t, http.StatusOK, res.Code) + assert.Equal(t, "test_request", req.Header.Get("X-Custom-Request-Header")) +} + +func TestCustomRequestHeaderEmptyValue(t *testing.T) { + emptyHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) + + header := newHeader(emptyHandler, config.Headers{ + CustomRequestHeaders: map[string]string{ + "X-Custom-Request-Header": "test_request", + }, + }) + + res := httptest.NewRecorder() + req := testhelpers.MustNewRequest(http.MethodGet, "/foo", nil) + + header.ServeHTTP(res, req) + + assert.Equal(t, http.StatusOK, res.Code) + assert.Equal(t, "test_request", req.Header.Get("X-Custom-Request-Header")) + + header = newHeader(emptyHandler, config.Headers{ + CustomRequestHeaders: map[string]string{ + "X-Custom-Request-Header": "", + }, + }) + + header.ServeHTTP(res, req) + + assert.Equal(t, http.StatusOK, res.Code) + assert.Equal(t, "", req.Header.Get("X-Custom-Request-Header")) +} + +func TestSecureHeader(t *testing.T) { + testCases := []struct { + desc string + fromHost string + expected int + }{ + { + desc: "Should accept the request when given a host that is in the list", + fromHost: "foo.com", + expected: http.StatusOK, + }, + { + desc: "Should refuse the request when no host is given", + fromHost: "", + expected: http.StatusInternalServerError, + }, + { + desc: "Should refuse the request when no matching host is given", + fromHost: "boo.com", + expected: http.StatusInternalServerError, + }, + } + + emptyHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) + header, err := New(context.Background(), emptyHandler, config.Headers{ + AllowedHosts: []string{"foo.com", "bar.com"}, + }, "foo") + require.NoError(t, err) + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + res := httptest.NewRecorder() + req := testhelpers.MustNewRequest(http.MethodGet, "/foo", nil) + req.Host = test.fromHost + header.ServeHTTP(res, req) + assert.Equal(t, test.expected, res.Code) + }) + } +} diff --git a/middlewares/ipwhitelist/ip_whitelist.go b/middlewares/ipwhitelist/ip_whitelist.go new file mode 100644 index 000000000..a96a5d137 --- /dev/null +++ b/middlewares/ipwhitelist/ip_whitelist.go @@ -0,0 +1,85 @@ +package ipwhitelist + +import ( + "context" + "fmt" + "net/http" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/ip" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + typeName = "IPWhiteLister" +) + +// ipWhiteLister is a middleware that provides Checks of the Requesting IP against a set of Whitelists +type ipWhiteLister struct { + next http.Handler + whiteLister *ip.Checker + strategy ip.Strategy + name string +} + +// New builds a new IPWhiteLister given a list of CIDR-Strings to whitelist +func New(ctx context.Context, next http.Handler, config config.IPWhiteList, name string) (http.Handler, error) { + logger := middlewares.GetLogger(ctx, name, typeName) + logger.Debug("Creating middleware") + + if len(config.SourceRange) == 0 { + return nil, errors.New("sourceRange is empty, IPWhiteLister not created") + } + + checker, err := ip.NewChecker(config.SourceRange) + if err != nil { + return nil, fmt.Errorf("cannot parse CIDR whitelist %s: %v", config.SourceRange, err) + } + + strategy, err := config.IPStrategy.Get() + if err != nil { + return nil, err + } + + logger.Debugf("Setting up IPWhiteLister with sourceRange: %s", config.SourceRange) + return &ipWhiteLister{ + strategy: strategy, + whiteLister: checker, + next: next, + name: name, + }, nil +} + +func (wl *ipWhiteLister) GetTracingInformation() (string, ext.SpanKindEnum) { + return wl.name, tracing.SpanKindNoneEnum +} + +func (wl *ipWhiteLister) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + logger := middlewares.GetLogger(req.Context(), wl.name, typeName) + + err := wl.whiteLister.IsAuthorized(wl.strategy.GetIP(req)) + if err != nil { + logMessage := fmt.Sprintf("rejecting request %+v: %v", req, err) + logger.Debug(logMessage) + tracing.SetErrorWithEvent(req, logMessage) + reject(logger, rw) + return + } + logger.Debugf("Accept %s: %+v", wl.strategy.GetIP(req), req) + + wl.next.ServeHTTP(rw, req) +} + +func reject(logger logrus.FieldLogger, rw http.ResponseWriter) { + statusCode := http.StatusForbidden + + rw.WriteHeader(statusCode) + _, err := rw.Write([]byte(http.StatusText(statusCode))) + if err != nil { + logger.Error(err) + } +} diff --git a/middlewares/ipwhitelist/ip_whitelist_test.go b/middlewares/ipwhitelist/ip_whitelist_test.go new file mode 100644 index 000000000..7f525d32e --- /dev/null +++ b/middlewares/ipwhitelist/ip_whitelist_test.go @@ -0,0 +1,100 @@ +package ipwhitelist + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/containous/traefik/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewIPWhiteLister(t *testing.T) { + testCases := []struct { + desc string + whiteList config.IPWhiteList + expectedError bool + }{ + { + desc: "invalid IP", + whiteList: config.IPWhiteList{ + SourceRange: []string{"foo"}, + }, + expectedError: true, + }, + { + desc: "valid IP", + whiteList: config.IPWhiteList{ + SourceRange: []string{"10.10.10.10"}, + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) + whiteLister, err := New(context.Background(), next, test.whiteList, "traefikTest") + + if test.expectedError { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.NotNil(t, whiteLister) + } + }) + } +} + +func TestIPWhiteLister_ServeHTTP(t *testing.T) { + testCases := []struct { + desc string + whiteList config.IPWhiteList + remoteAddr string + expected int + }{ + { + desc: "authorized with remote address", + whiteList: config.IPWhiteList{ + SourceRange: []string{"20.20.20.20"}, + }, + remoteAddr: "20.20.20.20:1234", + expected: 200, + }, + { + desc: "non authorized with remote address", + whiteList: config.IPWhiteList{ + SourceRange: []string{"20.20.20.20"}, + }, + remoteAddr: "20.20.20.21:1234", + expected: 403, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) + whiteLister, err := New(context.Background(), next, test.whiteList, "traefikTest") + require.NoError(t, err) + + recorder := httptest.NewRecorder() + + req := httptest.NewRequest(http.MethodGet, "http://10.10.10.10", nil) + + if len(test.remoteAddr) > 0 { + req.RemoteAddr = test.remoteAddr + } + + whiteLister.ServeHTTP(recorder, req) + + assert.Equal(t, test.expected, recorder.Code) + }) + } +} diff --git a/middlewares/maxconnection/max_connection.go b/middlewares/maxconnection/max_connection.go new file mode 100644 index 000000000..d32615fd8 --- /dev/null +++ b/middlewares/maxconnection/max_connection.go @@ -0,0 +1,48 @@ +package maxconnection + +import ( + "context" + "fmt" + "net/http" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" + "github.com/vulcand/oxy/connlimit" + "github.com/vulcand/oxy/utils" +) + +const ( + typeName = "MaxConnection" +) + +type maxConnection struct { + handler http.Handler + name string +} + +// New creates a max connection middleware. +func New(ctx context.Context, next http.Handler, maxConns config.MaxConn, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + + extractFunc, err := utils.NewExtractor(maxConns.ExtractorFunc) + if err != nil { + return nil, fmt.Errorf("error creating connection limit: %v", err) + } + + handler, err := connlimit.New(next, extractFunc, maxConns.Amount) + if err != nil { + return nil, fmt.Errorf("error creating connection limit: %v", err) + } + + return &maxConnection{handler: handler, name: name}, nil +} + +func (mc *maxConnection) GetTracingInformation() (string, ext.SpanKindEnum) { + return mc.name, tracing.SpanKindNoneEnum +} + +func (mc *maxConnection) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + mc.handler.ServeHTTP(rw, req) +} diff --git a/middlewares/metrics.go b/middlewares/metrics.go deleted file mode 100644 index d16fd4402..000000000 --- a/middlewares/metrics.go +++ /dev/null @@ -1,131 +0,0 @@ -package middlewares - -import ( - "net/http" - "strconv" - "strings" - "sync/atomic" - "time" - "unicode/utf8" - - "github.com/containous/traefik/log" - "github.com/containous/traefik/metrics" - gokitmetrics "github.com/go-kit/kit/metrics" - "github.com/urfave/negroni" -) - -const ( - protoHTTP = "http" - protoSSE = "sse" - protoWebsocket = "websocket" -) - -// NewEntryPointMetricsMiddleware creates a new metrics middleware for an Entrypoint. -func NewEntryPointMetricsMiddleware(registry metrics.Registry, entryPointName string) negroni.Handler { - return &metricsMiddleware{ - reqsCounter: registry.EntrypointReqsCounter(), - reqDurationHistogram: registry.EntrypointReqDurationHistogram(), - openConnsGauge: registry.EntrypointOpenConnsGauge(), - baseLabels: []string{"entrypoint", entryPointName}, - } -} - -// NewBackendMetricsMiddleware creates a new metrics middleware for a Backend. -func NewBackendMetricsMiddleware(registry metrics.Registry, backendName string) negroni.Handler { - return &metricsMiddleware{ - reqsCounter: registry.BackendReqsCounter(), - reqDurationHistogram: registry.BackendReqDurationHistogram(), - openConnsGauge: registry.BackendOpenConnsGauge(), - baseLabels: []string{"backend", backendName}, - } -} - -type metricsMiddleware struct { - // Important: Since this int64 field is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platform - // See: https://golang.org/pkg/sync/atomic/ for more information - openConns int64 - reqsCounter gokitmetrics.Counter - reqDurationHistogram gokitmetrics.Histogram - openConnsGauge gokitmetrics.Gauge - baseLabels []string -} - -func (m *metricsMiddleware) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - labels := []string{"method", getMethod(r), "protocol", getRequestProtocol(r)} - labels = append(labels, m.baseLabels...) - - openConns := atomic.AddInt64(&m.openConns, 1) - m.openConnsGauge.With(labels...).Set(float64(openConns)) - defer func(labelValues []string) { - openConns := atomic.AddInt64(&m.openConns, -1) - m.openConnsGauge.With(labelValues...).Set(float64(openConns)) - }(labels) - - start := time.Now() - recorder := &responseRecorder{rw, http.StatusOK} - next(recorder, r) - - labels = append(labels, "code", strconv.Itoa(recorder.statusCode)) - m.reqsCounter.With(labels...).Add(1) - m.reqDurationHistogram.With(labels...).Observe(time.Since(start).Seconds()) -} - -func getRequestProtocol(req *http.Request) string { - switch { - case isWebsocketRequest(req): - return protoWebsocket - case isSSERequest(req): - return protoSSE - default: - return protoHTTP - } -} - -// isWebsocketRequest determines if the specified HTTP request is a websocket handshake request. -func isWebsocketRequest(req *http.Request) bool { - return containsHeader(req, "Connection", "upgrade") && containsHeader(req, "Upgrade", "websocket") -} - -// isSSERequest determines if the specified HTTP request is a request for an event subscription. -func isSSERequest(req *http.Request) bool { - return containsHeader(req, "Accept", "text/event-stream") -} - -func containsHeader(req *http.Request, name, value string) bool { - items := strings.Split(req.Header.Get(name), ",") - for _, item := range items { - if value == strings.ToLower(strings.TrimSpace(item)) { - return true - } - } - return false -} - -func getMethod(r *http.Request) string { - if !utf8.ValidString(r.Method) { - log.Warnf("Invalid HTTP method encoding: %s", r.Method) - return "NON_UTF8_HTTP_METHOD" - } - return r.Method -} - -type retryMetrics interface { - BackendRetriesCounter() gokitmetrics.Counter -} - -// NewMetricsRetryListener instantiates a MetricsRetryListener with the given retryMetrics. -func NewMetricsRetryListener(retryMetrics retryMetrics, backendName string) RetryListener { - return &MetricsRetryListener{retryMetrics: retryMetrics, backendName: backendName} -} - -// MetricsRetryListener is an implementation of the RetryListener interface to -// record RequestMetrics about retry attempts. -type MetricsRetryListener struct { - retryMetrics retryMetrics - backendName string -} - -// Retried tracks the retry in the RequestMetrics implementation. -func (m *MetricsRetryListener) Retried(req *http.Request, attempt int) { - m.retryMetrics.BackendRetriesCounter().With("backend", m.backendName).Add(1) -} diff --git a/middlewares/metrics_test.go b/middlewares/metrics_test.go deleted file mode 100644 index e803e454e..000000000 --- a/middlewares/metrics_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package middlewares - -import ( - "net/http" - "net/http/httptest" - "reflect" - "testing" - - "github.com/containous/traefik/testhelpers" - "github.com/go-kit/kit/metrics" -) - -func TestMetricsRetryListener(t *testing.T) { - req := httptest.NewRequest(http.MethodGet, "/", nil) - retryMetrics := newCollectingRetryMetrics() - retryListener := NewMetricsRetryListener(retryMetrics, "backendName") - retryListener.Retried(req, 1) - retryListener.Retried(req, 2) - - wantCounterValue := float64(2) - if retryMetrics.retriesCounter.CounterValue != wantCounterValue { - t.Errorf("got counter value of %f, want %f", retryMetrics.retriesCounter.CounterValue, wantCounterValue) - } - - wantLabelValues := []string{"backend", "backendName"} - if !reflect.DeepEqual(retryMetrics.retriesCounter.LastLabelValues, wantLabelValues) { - t.Errorf("wrong label values %v used, want %v", retryMetrics.retriesCounter.LastLabelValues, wantLabelValues) - } -} - -// collectingRetryMetrics is an implementation of the retryMetrics interface that can be used inside tests to collect the times Add() was called. -type collectingRetryMetrics struct { - retriesCounter *testhelpers.CollectingCounter -} - -func newCollectingRetryMetrics() *collectingRetryMetrics { - return &collectingRetryMetrics{retriesCounter: &testhelpers.CollectingCounter{}} -} - -func (metrics *collectingRetryMetrics) BackendRetriesCounter() metrics.Counter { - return metrics.retriesCounter -} diff --git a/middlewares/middleware.go b/middlewares/middleware.go new file mode 100644 index 000000000..c3cd9df29 --- /dev/null +++ b/middlewares/middleware.go @@ -0,0 +1,13 @@ +package middlewares + +import ( + "context" + + "github.com/containous/traefik/log" + "github.com/sirupsen/logrus" +) + +// GetLogger creates a logger configured with the middleware fields. +func GetLogger(ctx context.Context, middleware string, middlewareType string) logrus.FieldLogger { + return log.FromContext(ctx).WithField(log.MiddlewareName, middleware).WithField(log.MiddlewareType, middlewareType) +} diff --git a/middlewares/passtlsclientcert/pass_tls_client_cert.go b/middlewares/passtlsclientcert/pass_tls_client_cert.go new file mode 100644 index 000000000..32823b61b --- /dev/null +++ b/middlewares/passtlsclientcert/pass_tls_client_cert.go @@ -0,0 +1,253 @@ +package passtlsclientcert + +import ( + "context" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" + "github.com/sirupsen/logrus" +) + +const ( + xForwardedTLSClientCert = "X-Forwarded-Tls-Client-Cert" + xForwardedTLSClientCertInfos = "X-Forwarded-Tls-Client-Cert-infos" + typeName = "PassClientTLSCert" +) + +// passTLSClientCert is a middleware that helps setup a few tls info features. +type passTLSClientCert struct { + next http.Handler + name string + pem bool // pass the sanitized pem to the backend in a specific header + infos *tlsClientCertificateInfos // pass selected information from the client certificate +} + +// New constructs a new PassTLSClientCert instance from supplied frontend header struct. +func New(ctx context.Context, next http.Handler, config config.PassTLSClientCert, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + + return &passTLSClientCert{ + next: next, + name: name, + pem: config.PEM, + infos: newTLSClientInfos(config.Infos), + }, nil +} + +// tlsClientCertificateInfos is a struct for specifying the configuration for the passTLSClientCert middleware. +type tlsClientCertificateInfos struct { + notAfter bool + notBefore bool + subject *tlsCLientCertificateSubjectInfos + sans bool +} + +func newTLSClientInfos(infos *config.TLSClientCertificateInfos) *tlsClientCertificateInfos { + if infos == nil { + return nil + } + + return &tlsClientCertificateInfos{ + notBefore: infos.NotBefore, + notAfter: infos.NotAfter, + sans: infos.Sans, + subject: newTLSCLientCertificateSubjectInfos(infos.Subject), + } +} + +// tlsCLientCertificateSubjectInfos contains the configuration for the certificate subject infos. +type tlsCLientCertificateSubjectInfos struct { + country bool + province bool + locality bool + Organization bool + commonName bool + serialNumber bool +} + +func newTLSCLientCertificateSubjectInfos(infos *config.TLSCLientCertificateSubjectInfos) *tlsCLientCertificateSubjectInfos { + if infos == nil { + return nil + } + + return &tlsCLientCertificateSubjectInfos{ + serialNumber: infos.SerialNumber, + commonName: infos.CommonName, + country: infos.Country, + locality: infos.Locality, + Organization: infos.Organization, + province: infos.Province, + } +} + +func (p *passTLSClientCert) GetTracingInformation() (string, ext.SpanKindEnum) { + return p.name, tracing.SpanKindNoneEnum +} + +func (p *passTLSClientCert) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + logger := middlewares.GetLogger(req.Context(), p.name, typeName) + p.modifyRequestHeaders(logger, req) + p.next.ServeHTTP(rw, req) +} + +// getSubjectInfos extract the requested information from the certificate subject. +func (p *passTLSClientCert) getSubjectInfos(cs *pkix.Name) string { + var subject string + + if p.infos != nil && p.infos.subject != nil { + options := p.infos.subject + + var content []string + + if options.country && len(cs.Country) > 0 { + content = append(content, fmt.Sprintf("C=%s", cs.Country[0])) + } + + if options.province && len(cs.Province) > 0 { + content = append(content, fmt.Sprintf("ST=%s", cs.Province[0])) + } + + if options.locality && len(cs.Locality) > 0 { + content = append(content, fmt.Sprintf("L=%s", cs.Locality[0])) + } + + if options.Organization && len(cs.Organization) > 0 { + content = append(content, fmt.Sprintf("O=%s", cs.Organization[0])) + } + + if options.commonName && len(cs.CommonName) > 0 { + content = append(content, fmt.Sprintf("CN=%s", cs.CommonName)) + } + + if len(content) > 0 { + subject = `Subject="` + strings.Join(content, ",") + `"` + } + } + + return subject +} + +// getXForwardedTLSClientCertInfos Build a string with the wanted client certificates information +// like Subject="C=%s,ST=%s,L=%s,O=%s,CN=%s",NB=%d,NA=%d,SAN=%s; +func (p *passTLSClientCert) getXForwardedTLSClientCertInfos(certs []*x509.Certificate) string { + var headerValues []string + + for _, peerCert := range certs { + var values []string + var sans string + var nb string + var na string + + subject := p.getSubjectInfos(&peerCert.Subject) + if len(subject) > 0 { + values = append(values, subject) + } + + ci := p.infos + if ci != nil { + if ci.notBefore { + nb = fmt.Sprintf("NB=%d", uint64(peerCert.NotBefore.Unix())) + values = append(values, nb) + } + if ci.notAfter { + na = fmt.Sprintf("NA=%d", uint64(peerCert.NotAfter.Unix())) + values = append(values, na) + } + + if ci.sans { + sans = fmt.Sprintf("SAN=%s", strings.Join(getSANs(peerCert), ",")) + values = append(values, sans) + } + } + + value := strings.Join(values, ",") + headerValues = append(headerValues, value) + } + + return strings.Join(headerValues, ";") +} + +// modifyRequestHeaders set the wanted headers with the certificates information. +func (p *passTLSClientCert) modifyRequestHeaders(logger logrus.FieldLogger, r *http.Request) { + if p.pem { + if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { + r.Header.Set(xForwardedTLSClientCert, getXForwardedTLSClientCert(logger, r.TLS.PeerCertificates)) + } else { + logger.Warn("Try to extract certificate on a request without TLS") + } + } + + if p.infos != nil { + if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { + headerContent := p.getXForwardedTLSClientCertInfos(r.TLS.PeerCertificates) + r.Header.Set(xForwardedTLSClientCertInfos, url.QueryEscape(headerContent)) + } else { + logger.Warn("Try to extract certificate on a request without TLS") + } + } +} + +// sanitize As we pass the raw certificates, remove the useless data and make it http request compliant. +func sanitize(cert []byte) string { + s := string(cert) + r := strings.NewReplacer("-----BEGIN CERTIFICATE-----", "", + "-----END CERTIFICATE-----", "", + "\n", "") + cleaned := r.Replace(s) + + return url.QueryEscape(cleaned) +} + +// extractCertificate extract the certificate from the request. +func extractCertificate(logger logrus.FieldLogger, cert *x509.Certificate) string { + b := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw} + certPEM := pem.EncodeToMemory(&b) + if certPEM == nil { + logger.Error("Cannot extract the certificate content") + return "" + } + return sanitize(certPEM) +} + +// getXForwardedTLSClientCert Build a string with the client certificates. +func getXForwardedTLSClientCert(logger logrus.FieldLogger, certs []*x509.Certificate) string { + var headerValues []string + + for _, peerCert := range certs { + headerValues = append(headerValues, extractCertificate(logger, peerCert)) + } + + return strings.Join(headerValues, ",") +} + +// getSANs get the Subject Alternate Name values. +func getSANs(cert *x509.Certificate) []string { + var sans []string + if cert == nil { + return sans + } + + sans = append(cert.DNSNames, cert.EmailAddresses...) + + var ips []string + for _, ip := range cert.IPAddresses { + ips = append(ips, ip.String()) + } + sans = append(sans, ips...) + + var uris []string + for _, uri := range cert.URIs { + uris = append(uris, uri.String()) + } + + return append(sans, uris...) +} diff --git a/middlewares/passtlsclientcert/pass_tls_client_cert_test.go b/middlewares/passtlsclientcert/pass_tls_client_cert_test.go new file mode 100644 index 000000000..1b5170bc8 --- /dev/null +++ b/middlewares/passtlsclientcert/pass_tls_client_cert_test.go @@ -0,0 +1,505 @@ +package passtlsclientcert + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "net" + "net/http" + "net/http/httptest" + "net/url" + "regexp" + "strings" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/require" +) + +const ( + rootCrt = `-----BEGIN CERTIFICATE----- +MIIDhjCCAm6gAwIBAgIJAIKZlW9a3VrYMA0GCSqGSIb3DQEBCwUAMFgxCzAJBgNV +BAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRlMREwDwYDVQQHDAhUb3Vsb3VzZTEh +MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMB4XDTE4MDcxNzIwMzQz +OFoXDTE4MDgxNjIwMzQzOFowWDELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUt +U3RhdGUxETAPBgNVBAcMCFRvdWxvdXNlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRn +aXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1P8GJ +H9LkIxIIqK9MyUpushnjmjwccpSMB3OecISKYLy62QDIcAw6NzGcSe8hMwciMJr+ +CdCjJlohybnaRI9hrJ3GPnI++UT/MMthf2IIcjmJxmD4k9L1fgs1V6zSTlo0+o0x +0gkAGlWvRkgA+3nt555ee84XQZuneKKeRRIlSA1ygycewFobZ/pGYijIEko+gYkV +sF3LnRGxNl673w+EQsvI7+z29T1nzjmM/xE7WlvnsrVd1/N61jAohLota0YTufwd +ioJZNryzuPejHBCiQRGMbJ7uEEZLiSCN6QiZEfqhS3AulykjgFXQQHn4zoVljSBR +UyLV0prIn5Scbks/AgMBAAGjUzBRMB0GA1UdDgQWBBTroRRnSgtkV+8dumtcftb/ +lwIkATAfBgNVHSMEGDAWgBTroRRnSgtkV+8dumtcftb/lwIkATAPBgNVHRMBAf8E +BTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAJ67U5cLa0ZFa/7zQQT4ldkY6YOEgR +0LNoTu51hc+ozaXSvF8YIBzkEpEnbGS3x4xodrwEBZjK2LFhNu/33gkCAuhmedgk +KwZrQM6lqRFGHGVOlkVz+QrJ2EsKYaO4SCUIwVjijXRLA7A30G5C/CIh66PsMgBY +6QHXVPEWm/v1d1Q/DfFfFzSOa1n1rIUw03qVJsxqSwfwYcegOF8YvS/eH4HUr2gF +cEujh6CCnylf35ExHa45atr3+xxbOVdNjobISkYADtbhAAn4KjLS4v8W6445vxxj +G5EIZLjOHyWg1sGaHaaAPkVpZQg8EKm21c4hrEEMfel60AMSSzad/a/V +-----END CERTIFICATE-----` + + minimalCert = `-----BEGIN CERTIFICATE----- +MIIDGTCCAgECCQCqLd75YLi2kDANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJG +UjETMBEGA1UECAwKU29tZS1TdGF0ZTERMA8GA1UEBwwIVG91bG91c2UxITAfBgNV +BAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0xODA3MTgwODI4MTZaFw0x +ODA4MTcwODI4MTZaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRl +MSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC/+frDMMTLQyXG34F68BPhQq0kzK4LIq9Y0/gl +FjySZNn1C0QDWA1ubVCAcA6yY204I9cxcQDPNrhC7JlS5QA8Y5rhIBrqQlzZizAi +Rj3NTrRjtGUtOScnHuJaWjLy03DWD+aMwb7q718xt5SEABmmUvLwQK+EjW2MeDwj +y8/UEIpvrRDmdhGaqv7IFpIDkcIF7FowJ/hwDvx3PMc+z/JWK0ovzpvgbx69AVbw +ZxCimeha65rOqVi+lEetD26le+WnOdYsdJ2IkmpPNTXGdfb15xuAc+gFXfMCh7Iw +3Ynl6dZtZM/Ok2kiA7/OsmVnRKkWrtBfGYkI9HcNGb3zrk6nAgMBAAEwDQYJKoZI +hvcNAQELBQADggEBAC/R+Yvhh1VUhcbK49olWsk/JKqfS3VIDQYZg1Eo+JCPbwgS +I1BSYVfMcGzuJTX6ua3m/AHzGF3Tap4GhF4tX12jeIx4R4utnjj7/YKkTvuEM2f4 +xT56YqI7zalGScIB0iMeyNz1QcimRl+M/49au8ow9hNX8C2tcA2cwd/9OIj/6T8q +SBRHc6ojvbqZSJCO0jziGDT1L3D+EDgTjED4nd77v/NRdP+egb0q3P0s4dnQ/5AV +aQlQADUn61j3ScbGJ4NSeZFFvsl38jeRi/MEzp0bGgNBcPj6JHi7qbbauZcZfQ05 +jECvgAY7Nfd9mZ1KtyNaW31is+kag7NsvjxU/kM= +-----END CERTIFICATE-----` + + completeCert = `Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=FR, ST=Some-State, L=Toulouse, O=Internet Widgits Pty Ltd + Validity + Not Before: Jul 18 08:00:16 2018 GMT + Not After : Jul 18 08:00:16 2019 GMT + subject: C=FR, ST=SomeState, L=Toulouse, O=Cheese, CN=*.cheese.org + subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:a6:1f:96:7c:c1:cc:b8:1c:b5:91:5d:b8:bf:70: + bc:f7:b8:04:4f:2a:42:de:ea:c5:c3:19:0b:03:04: + ec:ef:a1:24:25:de:ad:05:e7:26:ea:89:6c:59:60: + 10:18:0c:73:f1:bf:d3:cc:7b:ed:6b:9c:ea:1d:88: + e2:ee:14:81:d7:07:ee:87:95:3d:36:df:9c:38:b7: + 7b:1e:2b:51:9c:4a:1f:d0:cc:5b:af:5d:6c:5c:35: + 49:32:e4:01:5b:f9:8c:71:cf:62:48:5a:ea:b7:31: + 58:e2:c6:d0:5b:1c:50:b5:5c:6d:5a:6f:da:41:5e: + d5:4c:6e:1a:21:f3:40:f9:9e:52:76:50:25:3e:03: + 9b:87:19:48:5b:47:87:d3:67:c6:25:69:77:29:8e: + 56:97:45:d9:6f:64:a8:4e:ad:35:75:2e:fc:6a:2e: + 47:87:76:fc:4e:3e:44:e9:16:b2:c7:f0:23:98:13: + a2:df:15:23:cb:0c:3d:fd:48:5e:c7:2c:86:70:63: + 8b:c6:c8:89:17:52:d5:a7:8e:cb:4e:11:9d:69:8e: + 8e:59:cc:7e:a3:bd:a1:11:88:d7:cf:7b:8c:19:46: + 9c:1b:7a:c9:39:81:4c:58:08:1f:c7:ce:b0:0e:79: + 64:d3:11:72:65:e6:dd:bd:00:7f:22:30:46:9b:66: + 9c:b9 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + X509v3 subject Alternative Name: + DNS:*.cheese.org, DNS:*.cheese.net, DNS:cheese.in, IP Address:10.0.1.0, IP Address:10.0.1.2, email:test@cheese.org, email:test@cheese.net + X509v3 subject Key Identifier: + AB:6B:89:25:11:FC:5E:7B:D4:B0:F7:D4:B6:D9:EB:D0:30:93:E5:58 + Signature Algorithm: sha1WithRSAEncryption + ad:87:84:a0:88:a3:4c:d9:0a:c0:14:e4:2d:9a:1d:bb:57:b7: + 12:ef:3a:fb:8b:b2:ce:32:b8:04:e6:59:c8:4f:14:6a:b5:12: + 46:e9:c9:0a:11:64:ea:a1:86:20:96:0e:a7:40:e3:aa:e5:98: + 91:36:89:77:b6:b9:73:7e:1a:58:19:ae:d1:14:83:1e:c1:5f: + a5:a0:32:bb:52:68:b4:8d:a3:1d:b3:08:d7:45:6e:3b:87:64: + 7e:ef:46:e6:6f:d5:79:d7:1d:57:68:67:d8:18:39:61:5b:8b: + 1a:7f:88:da:0a:51:9b:3d:6c:5d:b1:cf:b7:e9:1e:06:65:8e: + 96:d3:61:96:f8:a2:61:f9:40:5e:fa:bc:76:b9:64:0e:6f:90: + 37:de:ac:6d:7f:36:84:35:19:88:8c:26:af:3e:c3:6a:1a:03: + ed:d7:90:89:ed:18:4c:9e:94:1f:d8:ae:6c:61:36:17:72:f9: + bb:de:0a:56:9a:79:b4:7d:4a:9d:cb:4a:7d:71:9f:38:e7:8d: + f0:87:24:21:0a:24:1f:82:9a:6b:67:ce:7d:af:cb:91:6b:8a: + de:e6:d8:6f:a1:37:b9:2d:d0:cb:e8:4e:f4:43:af:ad:90:13: + 7d:61:7a:ce:86:48:fc:00:8c:37:fb:e0:31:6b:e2:18:ad:fd: + 1e:df:08:db +-----BEGIN CERTIFICATE----- +MIIDvTCCAqWgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJGUjET +MBEGA1UECAwKU29tZS1TdGF0ZTERMA8GA1UEBwwIVG91bG91c2UxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0xODA3MTgwODAwMTZaFw0xOTA3 +MTgwODAwMTZaMFwxCzAJBgNVBAYTAkZSMRIwEAYDVQQIDAlTb21lU3RhdGUxETAP +BgNVBAcMCFRvdWxvdXNlMQ8wDQYDVQQKDAZDaGVlc2UxFTATBgNVBAMMDCouY2hl +ZXNlLm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKYflnzBzLgc +tZFduL9wvPe4BE8qQt7qxcMZCwME7O+hJCXerQXnJuqJbFlgEBgMc/G/08x77Wuc +6h2I4u4UgdcH7oeVPTbfnDi3ex4rUZxKH9DMW69dbFw1STLkAVv5jHHPYkha6rcx +WOLG0FscULVcbVpv2kFe1UxuGiHzQPmeUnZQJT4Dm4cZSFtHh9NnxiVpdymOVpdF +2W9kqE6tNXUu/GouR4d2/E4+ROkWssfwI5gTot8VI8sMPf1IXscshnBji8bIiRdS +1aeOy04RnWmOjlnMfqO9oRGI1897jBlGnBt6yTmBTFgIH8fOsA55ZNMRcmXm3b0A +fyIwRptmnLkCAwEAAaOBjTCBijAJBgNVHRMEAjAAMF4GA1UdEQRXMFWCDCouY2hl +ZXNlLm9yZ4IMKi5jaGVlc2UubmV0ggljaGVlc2UuaW6HBAoAAQCHBAoAAQKBD3Rl +c3RAY2hlZXNlLm9yZ4EPdGVzdEBjaGVlc2UubmV0MB0GA1UdDgQWBBSra4klEfxe +e9Sw99S22evQMJPlWDANBgkqhkiG9w0BAQUFAAOCAQEArYeEoIijTNkKwBTkLZod +u1e3Eu86+4uyzjK4BOZZyE8UarUSRunJChFk6qGGIJYOp0DjquWYkTaJd7a5c34a +WBmu0RSDHsFfpaAyu1JotI2jHbMI10VuO4dkfu9G5m/VedcdV2hn2Bg5YVuLGn+I +2gpRmz1sXbHPt+keBmWOltNhlviiYflAXvq8drlkDm+QN96sbX82hDUZiIwmrz7D +ahoD7deQie0YTJ6UH9iubGE2F3L5u94KVpp5tH1KnctKfXGfOOeN8IckIQokH4Ka +a2fOfa/LkWuK3ubYb6E3uS3Qy+hO9EOvrZATfWF6zoZI/ACMN/vgMWviGK39Ht8I +2w== +-----END CERTIFICATE----- +` +) + +func getCleanCertContents(certContents []string) string { + var re = regexp.MustCompile("-----BEGIN CERTIFICATE-----(?s)(.*)") + + var cleanedCertContent []string + for _, certContent := range certContents { + cert := re.FindString(certContent) + cleanedCertContent = append(cleanedCertContent, sanitize([]byte(cert))) + } + + return strings.Join(cleanedCertContent, ",") +} + +func getCertificate(certContent string) *x509.Certificate { + roots := x509.NewCertPool() + ok := roots.AppendCertsFromPEM([]byte(rootCrt)) + if !ok { + panic("failed to parse root certificate") + } + + block, _ := pem.Decode([]byte(certContent)) + if block == nil { + panic("failed to parse certificate PEM") + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + panic("failed to parse certificate: " + err.Error()) + } + + return cert +} + +func buildTLSWith(certContents []string) *tls.ConnectionState { + var peerCertificates []*x509.Certificate + + for _, certContent := range certContents { + peerCertificates = append(peerCertificates, getCertificate(certContent)) + } + + return &tls.ConnectionState{PeerCertificates: peerCertificates} +} + +var next = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := w.Write([]byte("bar")) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +}) + +func getExpectedSanitized(s string) string { + return url.QueryEscape(strings.Replace(s, "\n", "", -1)) +} + +func TestSanitize(t *testing.T) { + testCases := []struct { + desc string + toSanitize []byte + expected string + }{ + { + desc: "Empty", + }, + { + desc: "With a minimal cert", + toSanitize: []byte(minimalCert), + expected: getExpectedSanitized(`MIIDGTCCAgECCQCqLd75YLi2kDANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJG +UjETMBEGA1UECAwKU29tZS1TdGF0ZTERMA8GA1UEBwwIVG91bG91c2UxITAfBgNV +BAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0xODA3MTgwODI4MTZaFw0x +ODA4MTcwODI4MTZaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRl +MSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC/+frDMMTLQyXG34F68BPhQq0kzK4LIq9Y0/gl +FjySZNn1C0QDWA1ubVCAcA6yY204I9cxcQDPNrhC7JlS5QA8Y5rhIBrqQlzZizAi +Rj3NTrRjtGUtOScnHuJaWjLy03DWD+aMwb7q718xt5SEABmmUvLwQK+EjW2MeDwj +y8/UEIpvrRDmdhGaqv7IFpIDkcIF7FowJ/hwDvx3PMc+z/JWK0ovzpvgbx69AVbw +ZxCimeha65rOqVi+lEetD26le+WnOdYsdJ2IkmpPNTXGdfb15xuAc+gFXfMCh7Iw +3Ynl6dZtZM/Ok2kiA7/OsmVnRKkWrtBfGYkI9HcNGb3zrk6nAgMBAAEwDQYJKoZI +hvcNAQELBQADggEBAC/R+Yvhh1VUhcbK49olWsk/JKqfS3VIDQYZg1Eo+JCPbwgS +I1BSYVfMcGzuJTX6ua3m/AHzGF3Tap4GhF4tX12jeIx4R4utnjj7/YKkTvuEM2f4 +xT56YqI7zalGScIB0iMeyNz1QcimRl+M/49au8ow9hNX8C2tcA2cwd/9OIj/6T8q +SBRHc6ojvbqZSJCO0jziGDT1L3D+EDgTjED4nd77v/NRdP+egb0q3P0s4dnQ/5AV +aQlQADUn61j3ScbGJ4NSeZFFvsl38jeRi/MEzp0bGgNBcPj6JHi7qbbauZcZfQ05 +jECvgAY7Nfd9mZ1KtyNaW31is+kag7NsvjxU/kM=`), + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + require.Equal(t, test.expected, sanitize(test.toSanitize), "The sanitized certificates should be equal") + }) + } + +} + +func TestTLSClientHeadersWithPEM(t *testing.T) { + testCases := []struct { + desc string + certContents []string // set the request TLS attribute if defined + config config.PassTLSClientCert + expectedHeader string + }{ + { + desc: "No TLS, no option", + }, + { + desc: "TLS, no option", + certContents: []string{minimalCert}, + }, + { + desc: "No TLS, with pem option true", + config: config.PassTLSClientCert{PEM: true}, + }, + { + desc: "TLS with simple certificate, with pem option true", + certContents: []string{minimalCert}, + config: config.PassTLSClientCert{PEM: true}, + expectedHeader: getCleanCertContents([]string{minimalCert}), + }, + { + desc: "TLS with complete certificate, with pem option true", + certContents: []string{completeCert}, + config: config.PassTLSClientCert{PEM: true}, + expectedHeader: getCleanCertContents([]string{completeCert}), + }, + { + desc: "TLS with two certificate, with pem option true", + certContents: []string{minimalCert, completeCert}, + config: config.PassTLSClientCert{PEM: true}, + expectedHeader: getCleanCertContents([]string{minimalCert, completeCert}), + }, + } + + for _, test := range testCases { + tlsClientHeaders, err := New(context.Background(), next, test.config, "foo") + require.NoError(t, err) + + res := httptest.NewRecorder() + req := testhelpers.MustNewRequest(http.MethodGet, "http://example.com/foo", nil) + + if test.certContents != nil && len(test.certContents) > 0 { + req.TLS = buildTLSWith(test.certContents) + } + + tlsClientHeaders.ServeHTTP(res, req) + + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + require.Equal(t, http.StatusOK, res.Code, "Http Status should be OK") + require.Equal(t, "bar", res.Body.String(), "Should be the expected body") + + if test.expectedHeader != "" { + require.Equal(t, getCleanCertContents(test.certContents), req.Header.Get(xForwardedTLSClientCert), "The request header should contain the cleaned certificate") + } else { + require.Empty(t, req.Header.Get(xForwardedTLSClientCert)) + } + require.Empty(t, res.Header().Get(xForwardedTLSClientCert), "The response header should be always empty") + }) + } + +} + +func TestGetSans(t *testing.T) { + urlFoo, err := url.Parse("my.foo.com") + require.NoError(t, err) + urlBar, err := url.Parse("my.bar.com") + require.NoError(t, err) + + testCases := []struct { + desc string + cert *x509.Certificate // set the request TLS attribute if defined + expected []string + }{ + { + desc: "With nil", + }, + { + desc: "Certificate without Sans", + cert: &x509.Certificate{}, + }, + { + desc: "Certificate with all Sans", + cert: &x509.Certificate{ + DNSNames: []string{"foo", "bar"}, + EmailAddresses: []string{"test@test.com", "test2@test.com"}, + IPAddresses: []net.IP{net.IPv4(10, 0, 0, 1), net.IPv4(10, 0, 0, 2)}, + URIs: []*url.URL{urlFoo, urlBar}, + }, + expected: []string{"foo", "bar", "test@test.com", "test2@test.com", "10.0.0.1", "10.0.0.2", urlFoo.String(), urlBar.String()}, + }, + } + + for _, test := range testCases { + sans := getSANs(test.cert) + + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + if len(test.expected) > 0 { + for i, expected := range test.expected { + require.Equal(t, expected, sans[i]) + } + } else { + require.Empty(t, sans) + } + }) + } + +} + +func TestTLSClientHeadersWithCertInfos(t *testing.T) { + minimalCertAllInfos := `Subject="C=FR,ST=Some-State,O=Internet Widgits Pty Ltd",NB=1531902496,NA=1534494496,SAN=` + completeCertAllInfos := `Subject="C=FR,ST=SomeState,L=Toulouse,O=Cheese,CN=*.cheese.org",NB=1531900816,NA=1563436816,SAN=*.cheese.org,*.cheese.net,cheese.in,test@cheese.org,test@cheese.net,10.0.1.0,10.0.1.2` + + testCases := []struct { + desc string + certContents []string // set the request TLS attribute if defined + config config.PassTLSClientCert + expectedHeader string + }{ + { + desc: "No TLS, no option", + }, + { + desc: "TLS, no option", + certContents: []string{minimalCert}, + }, + { + desc: "No TLS, with pem option true", + config: config.PassTLSClientCert{ + Infos: &config.TLSClientCertificateInfos{ + Subject: &config.TLSCLientCertificateSubjectInfos{ + CommonName: true, + Organization: true, + Locality: true, + Province: true, + Country: true, + SerialNumber: true, + }, + }, + }, + }, + { + desc: "No TLS, with pem option true with no flag", + config: config.PassTLSClientCert{ + PEM: false, + Infos: &config.TLSClientCertificateInfos{ + Subject: &config.TLSCLientCertificateSubjectInfos{}, + }, + }, + }, + { + desc: "TLS with simple certificate, with all infos", + certContents: []string{minimalCert}, + config: config.PassTLSClientCert{ + Infos: &config.TLSClientCertificateInfos{ + NotAfter: true, + NotBefore: true, + Subject: &config.TLSCLientCertificateSubjectInfos{ + CommonName: true, + Organization: true, + Locality: true, + Province: true, + Country: true, + SerialNumber: true, + }, + Sans: true, + }, + }, + expectedHeader: url.QueryEscape(minimalCertAllInfos), + }, + { + desc: "TLS with simple certificate, with some infos", + certContents: []string{minimalCert}, + config: config.PassTLSClientCert{ + Infos: &config.TLSClientCertificateInfos{ + NotAfter: true, + Subject: &config.TLSCLientCertificateSubjectInfos{ + Organization: true, + }, + Sans: true, + }, + }, + expectedHeader: url.QueryEscape(`Subject="O=Internet Widgits Pty Ltd",NA=1534494496,SAN=`), + }, + { + desc: "TLS with complete certificate, with all infos", + certContents: []string{completeCert}, + config: config.PassTLSClientCert{ + Infos: &config.TLSClientCertificateInfos{ + NotAfter: true, + NotBefore: true, + Subject: &config.TLSCLientCertificateSubjectInfos{ + CommonName: true, + Organization: true, + Locality: true, + Province: true, + Country: true, + SerialNumber: true, + }, + Sans: true, + }, + }, + expectedHeader: url.QueryEscape(completeCertAllInfos), + }, + { + desc: "TLS with 2 certificates, with all infos", + certContents: []string{minimalCert, completeCert}, + config: config.PassTLSClientCert{ + Infos: &config.TLSClientCertificateInfos{ + NotAfter: true, + NotBefore: true, + Subject: &config.TLSCLientCertificateSubjectInfos{ + CommonName: true, + Organization: true, + Locality: true, + Province: true, + Country: true, + SerialNumber: true, + }, + Sans: true, + }, + }, + expectedHeader: url.QueryEscape(strings.Join([]string{minimalCertAllInfos, completeCertAllInfos}, ";")), + }, + } + + for _, test := range testCases { + tlsClientHeaders, err := New(context.Background(), next, test.config, "foo") + require.NoError(t, err) + + res := httptest.NewRecorder() + req := testhelpers.MustNewRequest(http.MethodGet, "http://example.com/foo", nil) + + if test.certContents != nil && len(test.certContents) > 0 { + req.TLS = buildTLSWith(test.certContents) + } + + tlsClientHeaders.ServeHTTP(res, req) + + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + require.Equal(t, http.StatusOK, res.Code, "Http Status should be OK") + require.Equal(t, "bar", res.Body.String(), "Should be the expected body") + + if test.expectedHeader != "" { + require.Equal(t, test.expectedHeader, req.Header.Get(xForwardedTLSClientCertInfos), "The request header should contain the cleaned certificate") + } else { + require.Empty(t, req.Header.Get(xForwardedTLSClientCertInfos)) + } + require.Empty(t, res.Header().Get(xForwardedTLSClientCertInfos), "The response header should be always empty") + }) + } + +} diff --git a/middlewares/ratelimiter/rate_limiter.go b/middlewares/ratelimiter/rate_limiter.go new file mode 100644 index 000000000..36fa3d18d --- /dev/null +++ b/middlewares/ratelimiter/rate_limiter.go @@ -0,0 +1,54 @@ +package ratelimiter + +import ( + "context" + "net/http" + "time" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" + "github.com/vulcand/oxy/ratelimit" + "github.com/vulcand/oxy/utils" +) + +const ( + typeName = "RateLimiterType" +) + +type rateLimiter struct { + handler http.Handler + name string +} + +// New creates rate limiter middleware. +func New(ctx context.Context, next http.Handler, config config.RateLimit, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + + extractFunc, err := utils.NewExtractor(config.ExtractorFunc) + if err != nil { + return nil, err + } + + rateSet := ratelimit.NewRateSet() + for _, rate := range config.RateSet { + if err = rateSet.Add(time.Duration(rate.Period), rate.Average, rate.Burst); err != nil { + return nil, err + } + } + + rl, err := ratelimit.New(next, extractFunc, rateSet) + if err != nil { + return nil, err + } + return &rateLimiter{handler: rl, name: name}, nil +} + +func (r *rateLimiter) GetTracingInformation() (string, ext.SpanKindEnum) { + return r.name, tracing.SpanKindNoneEnum +} + +func (r *rateLimiter) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + r.handler.ServeHTTP(rw, req) +} diff --git a/middlewares/recovery/recovery.go b/middlewares/recovery/recovery.go new file mode 100644 index 000000000..28886e5eb --- /dev/null +++ b/middlewares/recovery/recovery.go @@ -0,0 +1,40 @@ +package recovery + +import ( + "context" + "net/http" + + "github.com/containous/traefik/middlewares" + "github.com/sirupsen/logrus" +) + +const ( + typeName = "Recovery" +) + +type recovery struct { + next http.Handler + name string +} + +// New creates recovery middleware. +func New(ctx context.Context, next http.Handler, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + + return &recovery{ + next: next, + name: name, + }, nil +} + +func (re *recovery) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + defer recoverFunc(middlewares.GetLogger(req.Context(), re.name, typeName), rw) + re.next.ServeHTTP(rw, req) +} + +func recoverFunc(logger logrus.FieldLogger, rw http.ResponseWriter) { + if err := recover(); err != nil { + logger.Errorf("Recovered from panic in http handler: %+v", err) + http.Error(rw, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + } +} diff --git a/middlewares/recovery/recovery_test.go b/middlewares/recovery/recovery_test.go new file mode 100644 index 000000000..0871f3909 --- /dev/null +++ b/middlewares/recovery/recovery_test.go @@ -0,0 +1,27 @@ +package recovery + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRecoverHandler(t *testing.T) { + fn := func(w http.ResponseWriter, r *http.Request) { + panic("I love panicing!") + } + recovery, err := New(context.Background(), http.HandlerFunc(fn), "foo-recovery") + require.NoError(t, err) + + server := httptest.NewServer(recovery) + defer server.Close() + + resp, err := http.Get(server.URL) + require.NoError(t, err) + + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) +} diff --git a/middlewares/redirect/redirect.go b/middlewares/redirect/redirect.go index 506d7db3f..46de16fa3 100644 --- a/middlewares/redirect/redirect.go +++ b/middlewares/redirect/redirect.go @@ -2,110 +2,87 @@ package redirect import ( "bytes" - "fmt" + "context" + "html/template" "io" "net/http" "net/url" "regexp" "strings" - "text/template" - "github.com/containous/traefik/configuration" + "github.com/containous/traefik/config" "github.com/containous/traefik/middlewares" - "github.com/urfave/negroni" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" "github.com/vulcand/oxy/utils" ) const ( - defaultRedirectRegex = `^(?:https?:\/\/)?([\w\._-]+)(?::\d+)?(.*)$` + typeName = "Redirect" ) -// NewEntryPointHandler create a new redirection handler base on entry point -func NewEntryPointHandler(dstEntryPoint *configuration.EntryPoint, permanent bool) (negroni.Handler, error) { - exp := regexp.MustCompile(`(:\d+)`) - match := exp.FindStringSubmatch(dstEntryPoint.Address) - if len(match) == 0 { - return nil, fmt.Errorf("bad Address format %q", dstEntryPoint.Address) - } - - protocol := "http" - if dstEntryPoint.TLS != nil { - protocol = "https" - } - - replacement := protocol + "://${1}" + match[0] + "${2}" - - return NewRegexHandler(defaultRedirectRegex, replacement, permanent) +type redirect struct { + next http.Handler + regex *regexp.Regexp + replacement string + permanent bool + errHandler utils.ErrorHandler + name string } -// NewRegexHandler create a new redirection handler base on regex -func NewRegexHandler(exp string, replacement string, permanent bool) (negroni.Handler, error) { - re, err := regexp.Compile(exp) +// New creates a redirect middleware. +func New(ctx context.Context, next http.Handler, config config.Redirect, name string) (http.Handler, error) { + logger := middlewares.GetLogger(ctx, name, typeName) + logger.Debug("Creating middleware") + logger.Debugf("Setting up redirect %s -> %s", config.Regex, config.Replacement) + + re, err := regexp.Compile(config.Regex) if err != nil { return nil, err } - return &handler{ - regexp: re, - replacement: replacement, - permanent: permanent, + return &redirect{ + regex: re, + replacement: config.Replacement, + permanent: config.Permanent, errHandler: utils.DefaultHandler, + next: next, + name: name, }, nil } -type handler struct { - regexp *regexp.Regexp - replacement string - permanent bool - errHandler utils.ErrorHandler +func (r *redirect) GetTracingInformation() (string, ext.SpanKindEnum) { + return r.name, tracing.SpanKindNoneEnum } -func (h *handler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next http.HandlerFunc) { +func (r *redirect) ServeHTTP(rw http.ResponseWriter, req *http.Request) { oldURL := rawURL(req) - // only continue if the Regexp param matches the URL - if !h.regexp.MatchString(oldURL) { - next.ServeHTTP(rw, req) + // If the Regexp doesn't match, skip to the next handler + if !r.regex.MatchString(oldURL) { + r.next.ServeHTTP(rw, req) return } // apply a rewrite regexp to the URL - newURL := h.regexp.ReplaceAllString(oldURL, h.replacement) + newURL := r.regex.ReplaceAllString(oldURL, r.replacement) // replace any variables that may be in there rewrittenURL := &bytes.Buffer{} if err := applyString(newURL, rewrittenURL, req); err != nil { - h.errHandler.ServeHTTP(rw, req, err) + r.errHandler.ServeHTTP(rw, req, err) return } // parse the rewritten URL and replace request URL with it parsedURL, err := url.Parse(rewrittenURL.String()) if err != nil { - h.errHandler.ServeHTTP(rw, req, err) + r.errHandler.ServeHTTP(rw, req, err) return } - if stripPrefix, stripPrefixOk := req.Context().Value(middlewares.StripPrefixKey).(string); stripPrefixOk { - if len(stripPrefix) > 0 { - parsedURL.Path = stripPrefix - } - } - - if addPrefix, addPrefixOk := req.Context().Value(middlewares.AddPrefixKey).(string); addPrefixOk { - if len(addPrefix) > 0 { - parsedURL.Path = strings.Replace(parsedURL.Path, addPrefix, "", 1) - } - } - - if replacePath, replacePathOk := req.Context().Value(middlewares.ReplacePathKey).(string); replacePathOk { - if len(replacePath) > 0 { - parsedURL.Path = replacePath - } - } - if newURL != oldURL { - handler := &moveHandler{location: parsedURL, permanent: h.permanent} + handler := &moveHandler{location: parsedURL, permanent: r.permanent} handler.ServeHTTP(rw, req) return } @@ -114,7 +91,7 @@ func (h *handler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next http // make sure the request URI corresponds the rewritten URL req.RequestURI = req.URL.RequestURI() - next.ServeHTTP(rw, req) + r.next.ServeHTTP(rw, req) } type moveHandler struct { @@ -124,21 +101,25 @@ type moveHandler struct { func (m *moveHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { rw.Header().Set("Location", m.location.String()) + status := http.StatusFound if m.permanent { status = http.StatusMovedPermanently } rw.WriteHeader(status) - rw.Write([]byte(http.StatusText(status))) + _, err := rw.Write([]byte(http.StatusText(status))) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } } -func rawURL(request *http.Request) string { +func rawURL(req *http.Request) string { scheme := "http" - if request.TLS != nil || isXForwardedHTTPS(request) { + if req.TLS != nil || isXForwardedHTTPS(req) { scheme = "https" } - return strings.Join([]string{scheme, "://", request.Host, request.RequestURI}, "") + return strings.Join([]string{scheme, "://", req.Host, req.RequestURI}, "") } func isXForwardedHTTPS(request *http.Request) bool { @@ -147,17 +128,13 @@ func isXForwardedHTTPS(request *http.Request) bool { return len(xForwardedProto) > 0 && xForwardedProto == "https" } -func applyString(in string, out io.Writer, request *http.Request) error { +func applyString(in string, out io.Writer, req *http.Request) error { t, err := template.New("t").Parse(in) if err != nil { return err } - data := struct { - Request *http.Request - }{ - Request: request, - } + data := struct{ Request *http.Request }{Request: req} return t.Execute(out, data) } diff --git a/middlewares/redirect/redirect_test.go b/middlewares/redirect/redirect_test.go index ab190daca..cee75670b 100644 --- a/middlewares/redirect/redirect_test.go +++ b/middlewares/redirect/redirect_test.go @@ -1,146 +1,129 @@ package redirect import ( + "context" + "crypto/tls" "net/http" "net/http/httptest" "testing" - "github.com/containous/traefik/configuration" + "github.com/containous/traefik/config" "github.com/containous/traefik/testhelpers" - "github.com/containous/traefik/tls" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestNewEntryPointHandler(t *testing.T) { - testCases := []struct { - desc string - entryPoint *configuration.EntryPoint - permanent bool - url string - expectedURL string - expectedStatus int - errorExpected bool - }{ - { - desc: "HTTP to HTTPS", - entryPoint: &configuration.EntryPoint{Address: ":443", TLS: &tls.TLS{}}, - url: "http://foo:80", - expectedURL: "https://foo:443", - expectedStatus: http.StatusFound, - }, - { - desc: "HTTPS to HTTP", - entryPoint: &configuration.EntryPoint{Address: ":80"}, - url: "https://foo:443", - expectedURL: "http://foo:80", - expectedStatus: http.StatusFound, - }, - { - desc: "HTTP to HTTP", - entryPoint: &configuration.EntryPoint{Address: ":88"}, - url: "http://foo:80", - expectedURL: "http://foo:88", - expectedStatus: http.StatusFound, - }, - { - desc: "HTTP to HTTPS permanent", - entryPoint: &configuration.EntryPoint{Address: ":443", TLS: &tls.TLS{}}, - permanent: true, - url: "http://foo:80", - expectedURL: "https://foo:443", - expectedStatus: http.StatusMovedPermanently, - }, - { - desc: "HTTPS to HTTP permanent", - entryPoint: &configuration.EntryPoint{Address: ":80"}, - permanent: true, - url: "https://foo:443", - expectedURL: "http://foo:80", - expectedStatus: http.StatusMovedPermanently, - }, - { - desc: "invalid address", - entryPoint: &configuration.EntryPoint{Address: ":foo", TLS: &tls.TLS{}}, - url: "http://foo:80", - errorExpected: true, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - handler, err := NewEntryPointHandler(test.entryPoint, test.permanent) - - if test.errorExpected { - require.Error(t, err) - } else { - require.NoError(t, err) - - recorder := httptest.NewRecorder() - r := testhelpers.MustNewRequest(http.MethodGet, test.url, nil) - handler.ServeHTTP(recorder, r, nil) - - location, err := recorder.Result().Location() - require.NoError(t, err) - - assert.Equal(t, test.expectedURL, location.String()) - assert.Equal(t, test.expectedStatus, recorder.Code) - } - }) - } -} - func TestNewRegexHandler(t *testing.T) { testCases := []struct { desc string - regex string - replacement string - permanent bool + config config.Redirect url string expectedURL string expectedStatus int errorExpected bool + secured bool }{ { - desc: "simple redirection", - regex: `^(?:http?:\/\/)(foo)(\.com)(:\d+)(.*)$`, - replacement: "https://${1}bar$2:443$4", + desc: "simple redirection", + config: config.Redirect{ + Regex: `^(?:http?:\/\/)(foo)(\.com)(:\d+)(.*)$`, + Replacement: "https://${1}bar$2:443$4", + }, url: "http://foo.com:80", expectedURL: "https://foobar.com:443", expectedStatus: http.StatusFound, }, { - desc: "use request header", - regex: `^(?:http?:\/\/)(foo)(\.com)(:\d+)(.*)$`, - replacement: `https://${1}{{ .Request.Header.Get "X-Foo" }}$2:443$4`, + desc: "use request header", + config: config.Redirect{ + Regex: `^(?:http?:\/\/)(foo)(\.com)(:\d+)(.*)$`, + Replacement: `https://${1}{{ .Request.Header.Get "X-Foo" }}$2:443$4`, + }, url: "http://foo.com:80", expectedURL: "https://foobar.com:443", expectedStatus: http.StatusFound, }, { - desc: "URL doesn't match regex", - regex: `^(?:http?:\/\/)(foo)(\.com)(:\d+)(.*)$`, - replacement: "https://${1}bar$2:443$4", + desc: "URL doesn't match regex", + config: config.Redirect{ + Regex: `^(?:http?:\/\/)(foo)(\.com)(:\d+)(.*)$`, + Replacement: "https://${1}bar$2:443$4", + }, url: "http://bar.com:80", expectedStatus: http.StatusOK, }, { - desc: "invalid rewritten URL", - regex: `^(.*)$`, - replacement: "http://192.168.0.%31/", + desc: "invalid rewritten URL", + config: config.Redirect{ + Regex: `^(.*)$`, + Replacement: "http://192.168.0.%31/", + }, url: "http://foo.com:80", expectedStatus: http.StatusBadGateway, }, { - desc: "invalid regex", - regex: `^(.*`, - replacement: "$1", + desc: "invalid regex", + config: config.Redirect{ + Regex: `^(.*`, + Replacement: "$1", + }, url: "http://foo.com:80", errorExpected: true, }, + { + desc: "HTTP to HTTPS permanent", + config: config.Redirect{ + Regex: `^http://`, + Replacement: "https://$1", + Permanent: true, + }, + url: "http://foo", + expectedURL: "https://foo", + expectedStatus: http.StatusMovedPermanently, + }, + { + desc: "HTTPS to HTTP permanent", + config: config.Redirect{ + Regex: `https://foo`, + Replacement: "http://foo", + Permanent: true, + }, + secured: true, + url: "https://foo", + expectedURL: "http://foo", + expectedStatus: http.StatusMovedPermanently, + }, + { + desc: "HTTP to HTTPS", + config: config.Redirect{ + Regex: `http://foo:80`, + Replacement: "https://foo:443", + }, + url: "http://foo:80", + expectedURL: "https://foo:443", + expectedStatus: http.StatusFound, + }, + { + desc: "HTTPS to HTTP", + config: config.Redirect{ + Regex: `https://foo:443`, + Replacement: "http://foo:80", + }, + secured: true, + url: "https://foo:443", + expectedURL: "http://foo:80", + expectedStatus: http.StatusFound, + }, + { + desc: "HTTP to HTTP", + config: config.Redirect{ + Regex: `http://foo:80`, + Replacement: "http://foo:88", + }, + url: "http://foo:80", + expectedURL: "http://foo:88", + expectedStatus: http.StatusFound, + }, } for _, test := range testCases { @@ -148,20 +131,23 @@ func TestNewRegexHandler(t *testing.T) { t.Run(test.desc, func(t *testing.T) { t.Parallel() - handler, err := NewRegexHandler(test.regex, test.replacement, test.permanent) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) + handler, err := New(context.Background(), next, test.config, "traefikTest") if test.errorExpected { - require.Nil(t, handler) require.Error(t, err) + require.Nil(t, handler) } else { - require.NotNil(t, handler) require.NoError(t, err) + require.NotNil(t, handler) recorder := httptest.NewRecorder() r := testhelpers.MustNewRequest(http.MethodGet, test.url, nil) + if test.secured { + r.TLS = &tls.ConnectionState{} + } r.Header.Set("X-Foo", "bar") - next := func(rw http.ResponseWriter, req *http.Request) {} - handler.ServeHTTP(recorder, r, next) + handler.ServeHTTP(recorder, r) if test.expectedStatus == http.StatusMovedPermanently || test.expectedStatus == http.StatusFound { assert.Equal(t, test.expectedStatus, recorder.Code) diff --git a/middlewares/replacepath/replace_path.go b/middlewares/replacepath/replace_path.go new file mode 100644 index 000000000..a9c969c21 --- /dev/null +++ b/middlewares/replacepath/replace_path.go @@ -0,0 +1,46 @@ +package replacepath + +import ( + "context" + "net/http" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" +) + +const ( + // ReplacedPathHeader is the default header to set the old path to. + ReplacedPathHeader = "X-Replaced-Path" + typeName = "ReplacePath" +) + +// ReplacePath is a middleware used to replace the path of a URL request. +type replacePath struct { + next http.Handler + path string + name string +} + +// New creates a new replace path middleware. +func New(ctx context.Context, next http.Handler, config config.ReplacePath, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + + return &replacePath{ + next: next, + path: config.Path, + name: name, + }, nil +} + +func (r *replacePath) GetTracingInformation() (string, ext.SpanKindEnum) { + return r.name, tracing.SpanKindNoneEnum +} + +func (r *replacePath) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + req.Header.Add(ReplacedPathHeader, req.URL.Path) + req.URL.Path = r.path + req.RequestURI = req.URL.RequestURI() + r.next.ServeHTTP(rw, req) +} diff --git a/middlewares/replacepath/replace_path_test.go b/middlewares/replacepath/replace_path_test.go new file mode 100644 index 000000000..63fd7cc9e --- /dev/null +++ b/middlewares/replacepath/replace_path_test.go @@ -0,0 +1,46 @@ +package replacepath + +import ( + "context" + "net/http" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReplacePath(t *testing.T) { + var replacementConfig = config.ReplacePath{ + Path: "/replacement-path", + } + + paths := []string{ + "/example", + "/some/really/long/path", + } + + for _, path := range paths { + t.Run(path, func(t *testing.T) { + + var expectedPath, actualHeader, requestURI string + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + expectedPath = r.URL.Path + actualHeader = r.Header.Get(ReplacedPathHeader) + requestURI = r.RequestURI + }) + + handler, err := New(context.Background(), next, replacementConfig, "foo-replace-path") + require.NoError(t, err) + + req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost"+path, nil) + + handler.ServeHTTP(nil, req) + + assert.Equal(t, expectedPath, replacementConfig.Path, "Unexpected path.") + assert.Equal(t, path, actualHeader, "Unexpected '%s' header.", ReplacedPathHeader) + assert.Equal(t, expectedPath, requestURI, "Unexpected request URI.") + }) + } +} diff --git a/middlewares/replacepathregex/replace_path_regex.go b/middlewares/replacepathregex/replace_path_regex.go new file mode 100644 index 000000000..61b921b1e --- /dev/null +++ b/middlewares/replacepathregex/replace_path_regex.go @@ -0,0 +1,57 @@ +package replacepathregex + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/middlewares/replacepath" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" +) + +const ( + typeName = "ReplacePathRegex" +) + +// ReplacePathRegex is a middleware used to replace the path of a URL request with a regular expression. +type replacePathRegex struct { + next http.Handler + regexp *regexp.Regexp + replacement string + name string +} + +// New creates a new replace path regex middleware. +func New(ctx context.Context, next http.Handler, config config.ReplacePathRegex, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + + exp, err := regexp.Compile(strings.TrimSpace(config.Regex)) + if err != nil { + return nil, fmt.Errorf("error compiling regular expression %s: %s", config.Regex, err) + } + + return &replacePathRegex{ + regexp: exp, + replacement: strings.TrimSpace(config.Replacement), + next: next, + name: name, + }, nil +} + +func (rp *replacePathRegex) GetTracingInformation() (string, ext.SpanKindEnum) { + return rp.name, tracing.SpanKindNoneEnum +} + +func (rp *replacePathRegex) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if rp.regexp != nil && len(rp.replacement) > 0 && rp.regexp.MatchString(req.URL.Path) { + req.Header.Add(replacepath.ReplacedPathHeader, req.URL.Path) + req.URL.Path = rp.regexp.ReplaceAllString(req.URL.Path, rp.replacement) + req.RequestURI = req.URL.RequestURI() + } + rp.next.ServeHTTP(rw, req) +} diff --git a/middlewares/replacepathregex/replace_path_regex_test.go b/middlewares/replacepathregex/replace_path_regex_test.go new file mode 100644 index 000000000..74d6fafe1 --- /dev/null +++ b/middlewares/replacepathregex/replace_path_regex_test.go @@ -0,0 +1,104 @@ +package replacepathregex + +import ( + "context" + "net/http" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares/replacepath" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReplacePathRegex(t *testing.T) { + testCases := []struct { + desc string + path string + config config.ReplacePathRegex + expectedPath string + expectedHeader string + expectsError bool + }{ + { + desc: "simple regex", + path: "/whoami/and/whoami", + config: config.ReplacePathRegex{ + Replacement: "/who-am-i/$1", + Regex: `^/whoami/(.*)`, + }, + expectedPath: "/who-am-i/and/whoami", + expectedHeader: "/whoami/and/whoami", + }, + { + desc: "simple replace (no regex)", + path: "/whoami/and/whoami", + config: config.ReplacePathRegex{ + Replacement: "/who-am-i", + Regex: `/whoami`, + }, + expectedPath: "/who-am-i/and/who-am-i", + expectedHeader: "/whoami/and/whoami", + }, + { + desc: "no match", + path: "/whoami/and/whoami", + config: config.ReplacePathRegex{ + Replacement: "/whoami", + Regex: `/no-match`, + }, + expectedPath: "/whoami/and/whoami", + }, + { + desc: "multiple replacement", + path: "/downloads/src/source.go", + config: config.ReplacePathRegex{ + Replacement: "/downloads/$1-$2", + Regex: `^(?i)/downloads/([^/]+)/([^/]+)$`, + }, + expectedPath: "/downloads/src-source.go", + expectedHeader: "/downloads/src/source.go", + }, + { + desc: "invalid regular expression", + path: "/invalid/regexp/test", + config: config.ReplacePathRegex{ + Replacement: "/valid/regexp/$1", + Regex: `^(?err)/invalid/regexp/([^/]+)$`, + }, + expectedPath: "/invalid/regexp/test", + expectsError: true, + }, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + + var actualPath, actualHeader, requestURI string + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + actualPath = r.URL.Path + actualHeader = r.Header.Get(replacepath.ReplacedPathHeader) + requestURI = r.RequestURI + }) + + handler, err := New(context.Background(), next, test.config, "foo-replace-path-regexp") + if test.expectsError { + require.Error(t, err) + } else { + require.NoError(t, err) + + req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost"+test.path, nil) + req.RequestURI = test.path + + handler.ServeHTTP(nil, req) + + assert.Equal(t, test.expectedPath, actualPath, "Unexpected path.") + assert.Equal(t, actualPath, requestURI, "Unexpected request URI.") + if test.expectedHeader != "" { + assert.Equal(t, test.expectedHeader, actualHeader, "Unexpected '%s' header.", replacepath.ReplacedPathHeader) + } + } + }) + } +} diff --git a/middlewares/requestdecorator/hostresolver.go b/middlewares/requestdecorator/hostresolver.go new file mode 100644 index 000000000..551643deb --- /dev/null +++ b/middlewares/requestdecorator/hostresolver.go @@ -0,0 +1,124 @@ +package requestdecorator + +import ( + "context" + "fmt" + "net" + "sort" + "strings" + "time" + + "github.com/containous/traefik/log" + "github.com/miekg/dns" + "github.com/patrickmn/go-cache" +) + +type cnameResolv struct { + TTL time.Duration + Record string +} + +type byTTL []*cnameResolv + +func (a byTTL) Len() int { return len(a) } +func (a byTTL) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTTL) Less(i, j int) bool { return a[i].TTL > a[j].TTL } + +// Resolver used for host resolver. +type Resolver struct { + CnameFlattening bool + ResolvConfig string + ResolvDepth int + cache *cache.Cache +} + +// CNAMEFlatten check if CNAME record exists, flatten if possible. +func (hr *Resolver) CNAMEFlatten(ctx context.Context, host string) string { + if hr.cache == nil { + hr.cache = cache.New(30*time.Minute, 5*time.Minute) + } + + result := host + request := host + + value, found := hr.cache.Get(host) + if found { + return value.(string) + } + + logger := log.FromContext(ctx) + var cacheDuration = 0 * time.Second + for depth := 0; depth < hr.ResolvDepth; depth++ { + resolv, err := cnameResolve(ctx, request, hr.ResolvConfig) + if err != nil { + logger.Error(err) + break + } + if resolv == nil { + break + } + + result = resolv.Record + if depth == 0 { + cacheDuration = resolv.TTL + } + request = resolv.Record + } + + if err := hr.cache.Add(host, result, cacheDuration); err != nil { + logger.Error(err) + } + + return result +} + +// cnameResolve resolves CNAME if exists, and return with the highest TTL. +func cnameResolve(ctx context.Context, host string, resolvPath string) (*cnameResolv, error) { + config, err := dns.ClientConfigFromFile(resolvPath) + if err != nil { + return nil, fmt.Errorf("invalid resolver configuration file: %s", resolvPath) + } + + client := &dns.Client{Timeout: 30 * time.Second} + + m := &dns.Msg{} + m.SetQuestion(dns.Fqdn(host), dns.TypeCNAME) + + var result []*cnameResolv + for _, server := range config.Servers { + tempRecord, err := getRecord(client, m, server, config.Port) + if err != nil { + log.FromContext(ctx).Errorf("Failed to resolve host %s: %v", host, err) + continue + } + result = append(result, tempRecord) + } + + if len(result) <= 0 { + return nil, nil + } + + sort.Sort(byTTL(result)) + return result[0], nil +} + +func getRecord(client *dns.Client, msg *dns.Msg, server string, port string) (*cnameResolv, error) { + resp, _, err := client.Exchange(msg, net.JoinHostPort(server, port)) + if err != nil { + return nil, fmt.Errorf("exchange error for server %s: %v", server, err) + } + + if resp == nil || len(resp.Answer) == 0 { + return nil, fmt.Errorf("empty answer for server %s", server) + } + + rr, ok := resp.Answer[0].(*dns.CNAME) + if !ok { + return nil, fmt.Errorf("invalid response type for server %s", server) + } + + return &cnameResolv{ + TTL: time.Duration(rr.Hdr.Ttl) * time.Second, + Record: strings.TrimSuffix(rr.Target, "."), + }, nil +} diff --git a/middlewares/requestdecorator/hostresolver_test.go b/middlewares/requestdecorator/hostresolver_test.go new file mode 100644 index 000000000..ed199c387 --- /dev/null +++ b/middlewares/requestdecorator/hostresolver_test.go @@ -0,0 +1,51 @@ +package requestdecorator + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCNAMEFlatten(t *testing.T) { + testCases := []struct { + desc string + resolvFile string + domain string + expectedDomain string + }{ + { + desc: "host request is CNAME record", + resolvFile: "/etc/resolv.conf", + domain: "www.github.com", + expectedDomain: "github.com", + }, + { + desc: "resolve file not found", + resolvFile: "/etc/resolv.oops", + domain: "www.github.com", + expectedDomain: "www.github.com", + }, + { + desc: "host request is not CNAME record", + resolvFile: "/etc/resolv.conf", + domain: "github.com", + expectedDomain: "github.com", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + hostResolver := &Resolver{ + ResolvConfig: test.resolvFile, + ResolvDepth: 5, + } + + flatH := hostResolver.CNAMEFlatten(context.Background(), test.domain) + assert.Equal(t, test.expectedDomain, flatH) + }) + } +} diff --git a/middlewares/requestdecorator/request_decorator.go b/middlewares/requestdecorator/request_decorator.go new file mode 100644 index 000000000..3672ad5cc --- /dev/null +++ b/middlewares/requestdecorator/request_decorator.go @@ -0,0 +1,88 @@ +package requestdecorator + +import ( + "context" + "net" + "net/http" + "strings" + + "github.com/containous/alice" + "github.com/containous/traefik/config/static" + "github.com/containous/traefik/old/types" +) + +const ( + canonicalKey key = "canonical" + flattenKey key = "flatten" +) + +type key string + +// RequestDecorator is the struct for the middleware that adds the CanonicalDomain of the request Host into a context for later use. +type RequestDecorator struct { + hostResolver *Resolver +} + +// New creates a new request host middleware. +func New(hostResolverConfig *static.HostResolverConfig) *RequestDecorator { + requestDecorator := &RequestDecorator{} + if hostResolverConfig != nil { + requestDecorator.hostResolver = &Resolver{ + CnameFlattening: hostResolverConfig.CnameFlattening, + ResolvConfig: hostResolverConfig.ResolvConfig, + ResolvDepth: hostResolverConfig.ResolvDepth, + } + } + return requestDecorator +} + +func (r *RequestDecorator) ServeHTTP(rw http.ResponseWriter, req *http.Request, next http.HandlerFunc) { + host := types.CanonicalDomain(parseHost(req.Host)) + reqt := req.WithContext(context.WithValue(req.Context(), canonicalKey, host)) + + if r.hostResolver != nil && r.hostResolver.CnameFlattening { + flatHost := r.hostResolver.CNAMEFlatten(reqt.Context(), host) + reqt = reqt.WithContext(context.WithValue(reqt.Context(), flattenKey, flatHost)) + } + + next(rw, reqt) +} + +func parseHost(addr string) string { + if !strings.Contains(addr, ":") { + return addr + } + + host, _, err := net.SplitHostPort(addr) + if err != nil { + return addr + } + return host +} + +// GetCanonizedHost retrieves the canonized host from the given context (previously stored in the request context by the middleware). +func GetCanonizedHost(ctx context.Context) string { + if val, ok := ctx.Value(canonicalKey).(string); ok { + return val + } + + return "" +} + +// GetCNAMEFlatten return the flat name if it is present in the context. +func GetCNAMEFlatten(ctx context.Context) string { + if val, ok := ctx.Value(flattenKey).(string); ok { + return val + } + + return "" +} + +// WrapHandler Wraps a ServeHTTP with next to an alice.Constructor. +func WrapHandler(handler *RequestDecorator) alice.Constructor { + return func(next http.Handler) (http.Handler, error) { + return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + handler.ServeHTTP(rw, req, next.ServeHTTP) + }), nil + } +} diff --git a/middlewares/requestdecorator/request_decorator_test.go b/middlewares/requestdecorator/request_decorator_test.go new file mode 100644 index 000000000..0053390ac --- /dev/null +++ b/middlewares/requestdecorator/request_decorator_test.go @@ -0,0 +1,145 @@ +package requestdecorator + +import ( + "net/http" + "testing" + + "github.com/containous/traefik/config/static" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" +) + +func TestRequestHost(t *testing.T) { + testCases := []struct { + desc string + url string + expected string + }{ + { + desc: "host without :", + url: "http://host", + expected: "host", + }, + { + desc: "host with : and without port", + url: "http://host:", + expected: "host", + }, + { + desc: "IP host with : and with port", + url: "http://127.0.0.1:123", + expected: "127.0.0.1", + }, + { + desc: "IP host with : and without port", + url: "http://127.0.0.1:", + expected: "127.0.0.1", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + next := http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { + host := GetCanonizedHost(r.Context()) + assert.Equal(t, test.expected, host) + }) + + rh := New(nil) + + req := testhelpers.MustNewRequest(http.MethodGet, test.url, nil) + + rh.ServeHTTP(nil, req, next) + }) + } +} + +func TestRequestFlattening(t *testing.T) { + testCases := []struct { + desc string + url string + expected string + }{ + { + desc: "host with flattening", + url: "http://www.github.com", + expected: "github.com", + }, + { + desc: "host without flattening", + url: "http://github.com", + expected: "github.com", + }, + { + desc: "ip without flattening", + url: "http://127.0.0.1", + expected: "127.0.0.1", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + next := http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { + host := GetCNAMEFlatten(r.Context()) + assert.Equal(t, test.expected, host) + }) + + rh := New( + &static.HostResolverConfig{ + CnameFlattening: true, + ResolvConfig: "/etc/resolv.conf", + ResolvDepth: 5, + }, + ) + + req := testhelpers.MustNewRequest(http.MethodGet, test.url, nil) + + rh.ServeHTTP(nil, req, next) + }) + } +} + +func TestRequestHostParseHost(t *testing.T) { + testCases := []struct { + desc string + host string + expected string + }{ + { + desc: "host without :", + host: "host", + expected: "host", + }, + { + desc: "host with : and without port", + host: "host:", + expected: "host", + }, + { + desc: "IP host with : and with port", + host: "127.0.0.1:123", + expected: "127.0.0.1", + }, + { + desc: "IP host with : and without port", + host: "127.0.0.1:", + expected: "127.0.0.1", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + actual := parseHost(test.host) + + assert.Equal(t, test.expected, actual) + }) + } +} diff --git a/middlewares/retry/retry.go b/middlewares/retry/retry.go new file mode 100644 index 000000000..dd23b185b --- /dev/null +++ b/middlewares/retry/retry.go @@ -0,0 +1,194 @@ +package retry + +import ( + "bufio" + "context" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptrace" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" +) + +// Compile time validation that the response writer implements http interfaces correctly. +var _ middlewares.Stateful = &responseWriterWithCloseNotify{} + +const ( + typeName = "Retry" +) + +// Listener is used to inform about retry attempts. +type Listener interface { + // Retried will be called when a retry happens, with the request attempt passed to it. + // For the first retry this will be attempt 2. + Retried(req *http.Request, attempt int) +} + +// Listeners is a convenience type to construct a list of Listener and notify +// each of them about a retry attempt. +type Listeners []Listener + +// retry is a middleware that retries requests. +type retry struct { + attempts int + next http.Handler + listener Listener + name string +} + +// New returns a new retry middleware. +func New(ctx context.Context, next http.Handler, config config.Retry, listener Listener, name string) (http.Handler, error) { + logger := middlewares.GetLogger(ctx, name, typeName) + logger.Debug("Creating middleware") + + if config.Attempts <= 0 { + return nil, fmt.Errorf("incorrect (or empty) value for attempt (%d)", config.Attempts) + } + + return &retry{ + attempts: config.Attempts, + next: next, + listener: listener, + name: name, + }, nil +} + +func (r *retry) GetTracingInformation() (string, ext.SpanKindEnum) { + return r.name, tracing.SpanKindNoneEnum +} + +func (r *retry) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // if we might make multiple attempts, swap the body for an ioutil.NopCloser + // cf https://github.com/containous/traefik/issues/1008 + if r.attempts > 1 { + body := req.Body + defer body.Close() + req.Body = ioutil.NopCloser(body) + } + + attempts := 1 + for { + attemptsExhausted := attempts >= r.attempts + shouldRetry := !attemptsExhausted + retryResponseWriter := newResponseWriter(rw, shouldRetry) + + // Disable retries when the backend already received request data + trace := &httptrace.ClientTrace{ + WroteHeaders: func() { + retryResponseWriter.DisableRetries() + }, + WroteRequest: func(httptrace.WroteRequestInfo) { + retryResponseWriter.DisableRetries() + }, + } + newCtx := httptrace.WithClientTrace(req.Context(), trace) + + r.next.ServeHTTP(retryResponseWriter, req.WithContext(newCtx)) + + if !retryResponseWriter.ShouldRetry() { + break + } + + attempts++ + logger := middlewares.GetLogger(req.Context(), r.name, typeName) + logger.Debugf("New attempt %d for request: %v", attempts, req.URL) + r.listener.Retried(req, attempts) + } +} + +// Retried exists to implement the Listener interface. It calls Retried on each of its slice entries. +func (l Listeners) Retried(req *http.Request, attempt int) { + for _, listener := range l { + listener.Retried(req, attempt) + } +} + +type responseWriter interface { + http.ResponseWriter + http.Flusher + ShouldRetry() bool + DisableRetries() +} + +func newResponseWriter(rw http.ResponseWriter, shouldRetry bool) responseWriter { + responseWriter := &responseWriterWithoutCloseNotify{ + responseWriter: rw, + shouldRetry: shouldRetry, + } + if _, ok := rw.(http.CloseNotifier); ok { + return &responseWriterWithCloseNotify{ + responseWriterWithoutCloseNotify: responseWriter, + } + } + return responseWriter +} + +type responseWriterWithoutCloseNotify struct { + responseWriter http.ResponseWriter + shouldRetry bool +} + +func (r *responseWriterWithoutCloseNotify) ShouldRetry() bool { + return r.shouldRetry +} + +func (r *responseWriterWithoutCloseNotify) DisableRetries() { + r.shouldRetry = false +} + +func (r *responseWriterWithoutCloseNotify) Header() http.Header { + if r.ShouldRetry() { + return make(http.Header) + } + return r.responseWriter.Header() +} + +func (r *responseWriterWithoutCloseNotify) Write(buf []byte) (int, error) { + if r.ShouldRetry() { + return len(buf), nil + } + return r.responseWriter.Write(buf) +} + +func (r *responseWriterWithoutCloseNotify) WriteHeader(code int) { + if r.ShouldRetry() && code == http.StatusServiceUnavailable { + // We get a 503 HTTP Status Code when there is no backend server in the pool + // to which the request could be sent. Also, note that r.ShouldRetry() + // will never return true in case there was a connection established to + // the backend server and so we can be sure that the 503 was produced + // inside Traefik already and we don't have to retry in this cases. + r.DisableRetries() + } + + if r.ShouldRetry() { + return + } + r.responseWriter.WriteHeader(code) +} + +func (r *responseWriterWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker, ok := r.responseWriter.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("%T is not a http.Hijacker", r.responseWriter) + } + return hijacker.Hijack() +} + +func (r *responseWriterWithoutCloseNotify) Flush() { + if flusher, ok := r.responseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +type responseWriterWithCloseNotify struct { + *responseWriterWithoutCloseNotify +} + +func (r *responseWriterWithCloseNotify) CloseNotify() <-chan bool { + return r.responseWriter.(http.CloseNotifier).CloseNotify() +} diff --git a/middlewares/retry/retry_test.go b/middlewares/retry/retry_test.go new file mode 100644 index 000000000..7efba979e --- /dev/null +++ b/middlewares/retry/retry_test.go @@ -0,0 +1,263 @@ +package retry + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares/emptybackendhandler" + "github.com/containous/traefik/testhelpers" + "github.com/gorilla/websocket" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/vulcand/oxy/forward" + "github.com/vulcand/oxy/roundrobin" +) + +func TestRetry(t *testing.T) { + testCases := []struct { + desc string + config config.Retry + wantRetryAttempts int + wantResponseStatus int + amountFaultyEndpoints int + }{ + { + desc: "no retry on success", + config: config.Retry{Attempts: 1}, + wantRetryAttempts: 0, + wantResponseStatus: http.StatusOK, + amountFaultyEndpoints: 0, + }, + { + desc: "no retry when max request attempts is one", + config: config.Retry{Attempts: 1}, + wantRetryAttempts: 0, + wantResponseStatus: http.StatusInternalServerError, + amountFaultyEndpoints: 1, + }, + { + desc: "one retry when one server is faulty", + config: config.Retry{Attempts: 2}, + wantRetryAttempts: 1, + wantResponseStatus: http.StatusOK, + amountFaultyEndpoints: 1, + }, + { + desc: "two retries when two servers are faulty", + config: config.Retry{Attempts: 3}, + wantRetryAttempts: 2, + wantResponseStatus: http.StatusOK, + amountFaultyEndpoints: 2, + }, + { + desc: "max attempts exhausted delivers the 5xx response", + config: config.Retry{Attempts: 3}, + wantRetryAttempts: 2, + wantResponseStatus: http.StatusInternalServerError, + amountFaultyEndpoints: 3, + }, + } + + backendServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.WriteHeader(http.StatusOK) + _, err := rw.Write([]byte("OK")) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + })) + + forwarder, err := forward.New() + require.NoError(t, err) + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + loadBalancer, err := roundrobin.New(forwarder) + require.NoError(t, err) + + basePort := 33444 + for i := 0; i < test.amountFaultyEndpoints; i++ { + // 192.0.2.0 is a non-routable IP for testing purposes. + // See: https://stackoverflow.com/questions/528538/non-routable-ip-address/18436928#18436928 + // We only use the port specification here because the URL is used as identifier + // in the load balancer and using the exact same URL would not add a new server. + err = loadBalancer.UpsertServer(testhelpers.MustParseURL("http://192.0.2.0:" + string(basePort+i))) + require.NoError(t, err) + } + + // add the functioning server to the end of the load balancer list + err = loadBalancer.UpsertServer(testhelpers.MustParseURL(backendServer.URL)) + require.NoError(t, err) + + retryListener := &countingRetryListener{} + retry, err := New(context.Background(), loadBalancer, test.config, retryListener, "traefikTest") + require.NoError(t, err) + + recorder := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "http://localhost:3000/ok", nil) + + retry.ServeHTTP(recorder, req) + + assert.Equal(t, test.wantResponseStatus, recorder.Code) + assert.Equal(t, test.wantRetryAttempts, retryListener.timesCalled) + }) + } +} + +func TestRetryEmptyServerList(t *testing.T) { + forwarder, err := forward.New() + require.NoError(t, err) + + loadBalancer, err := roundrobin.New(forwarder) + require.NoError(t, err) + + // The EmptyBackend middleware ensures that there is a 503 + // response status set when there is no backend server in the pool. + next := emptybackendhandler.New(loadBalancer) + + retryListener := &countingRetryListener{} + retry, err := New(context.Background(), next, config.Retry{Attempts: 3}, retryListener, "traefikTest") + require.NoError(t, err) + + recorder := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "http://localhost:3000/ok", nil) + + retry.ServeHTTP(recorder, req) + + assert.Equal(t, http.StatusServiceUnavailable, recorder.Code) + assert.Equal(t, 0, retryListener.timesCalled) +} + +func TestRetryListeners(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/", nil) + retryListeners := Listeners{&countingRetryListener{}, &countingRetryListener{}} + + retryListeners.Retried(req, 1) + retryListeners.Retried(req, 1) + + for _, retryListener := range retryListeners { + listener := retryListener.(*countingRetryListener) + if listener.timesCalled != 2 { + t.Errorf("retry listener was called %d time(s), want %d time(s)", listener.timesCalled, 2) + } + } +} + +// countingRetryListener is a Listener implementation to count the times the Retried fn is called. +type countingRetryListener struct { + timesCalled int +} + +func (l *countingRetryListener) Retried(req *http.Request, attempt int) { + l.timesCalled++ +} + +func TestRetryWithFlush(t *testing.T) { + next := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.WriteHeader(200) + _, err := rw.Write([]byte("FULL ")) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + rw.(http.Flusher).Flush() + _, err = rw.Write([]byte("DATA")) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + }) + + retry, err := New(context.Background(), next, config.Retry{Attempts: 1}, &countingRetryListener{}, "traefikTest") + require.NoError(t, err) + + responseRecorder := httptest.NewRecorder() + + retry.ServeHTTP(responseRecorder, &http.Request{}) + + assert.Equal(t, "FULL DATA", responseRecorder.Body.String()) +} + +func TestRetryWebsocket(t *testing.T) { + testCases := []struct { + desc string + maxRequestAttempts int + expectedRetryAttempts int + expectedResponseStatus int + expectedError bool + amountFaultyEndpoints int + }{ + { + desc: "Switching ok after 2 retries", + maxRequestAttempts: 3, + expectedRetryAttempts: 2, + amountFaultyEndpoints: 2, + expectedResponseStatus: http.StatusSwitchingProtocols, + }, + { + desc: "Switching failed", + maxRequestAttempts: 2, + expectedRetryAttempts: 1, + amountFaultyEndpoints: 2, + expectedResponseStatus: http.StatusBadGateway, + expectedError: true, + }, + } + + forwarder, err := forward.New() + if err != nil { + t.Fatalf("Error creating forwarder: %s", err) + } + + backendServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + upgrader := websocket.Upgrader{} + _, err := upgrader.Upgrade(rw, req, nil) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } + })) + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + loadBalancer, err := roundrobin.New(forwarder) + if err != nil { + t.Fatalf("Error creating load balancer: %s", err) + } + + basePort := 33444 + for i := 0; i < test.amountFaultyEndpoints; i++ { + // 192.0.2.0 is a non-routable IP for testing purposes. + // See: https://stackoverflow.com/questions/528538/non-routable-ip-address/18436928#18436928 + // We only use the port specification here because the URL is used as identifier + // in the load balancer and using the exact same URL would not add a new server. + _ = loadBalancer.UpsertServer(testhelpers.MustParseURL("http://192.0.2.0:" + string(basePort+i))) + } + + // add the functioning server to the end of the load balancer list + loadBalancer.UpsertServer(testhelpers.MustParseURL(backendServer.URL)) + + retryListener := &countingRetryListener{} + retryH, err := New(context.Background(), loadBalancer, config.Retry{Attempts: test.maxRequestAttempts}, retryListener, "traefikTest") + require.NoError(t, err) + + retryServer := httptest.NewServer(retryH) + + url := strings.Replace(retryServer.URL, "http", "ws", 1) + _, response, err := websocket.DefaultDialer.Dial(url, nil) + + if !test.expectedError { + require.NoError(t, err) + } + + assert.Equal(t, test.expectedResponseStatus, response.StatusCode) + assert.Equal(t, test.expectedRetryAttempts, retryListener.timesCalled) + }) + } +} diff --git a/middlewares/stripprefix/strip_prefix.go b/middlewares/stripprefix/strip_prefix.go new file mode 100644 index 000000000..3ef439b9e --- /dev/null +++ b/middlewares/stripprefix/strip_prefix.go @@ -0,0 +1,67 @@ +package stripprefix + +import ( + "context" + "net/http" + "strings" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" +) + +const ( + // ForwardedPrefixHeader is the default header to set prefix. + ForwardedPrefixHeader = "X-Forwarded-Prefix" + typeName = "StripPrefix" +) + +// stripPrefix is a middleware used to strip prefix from an URL request. +type stripPrefix struct { + next http.Handler + prefixes []string + name string +} + +// New creates a new strip prefix middleware. +func New(ctx context.Context, next http.Handler, config config.StripPrefix, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + return &stripPrefix{ + prefixes: config.Prefixes, + next: next, + name: name, + }, nil +} + +func (s *stripPrefix) GetTracingInformation() (string, ext.SpanKindEnum) { + return s.name, tracing.SpanKindNoneEnum +} + +func (s *stripPrefix) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + for _, prefix := range s.prefixes { + if strings.HasPrefix(req.URL.Path, prefix) { + req.URL.Path = getPrefixStripped(req.URL.Path, prefix) + if req.URL.RawPath != "" { + req.URL.RawPath = getPrefixStripped(req.URL.RawPath, prefix) + } + s.serveRequest(rw, req, strings.TrimSpace(prefix)) + return + } + } + http.NotFound(rw, req) +} + +func (s *stripPrefix) serveRequest(rw http.ResponseWriter, req *http.Request, prefix string) { + req.Header.Add(ForwardedPrefixHeader, prefix) + req.RequestURI = req.URL.RequestURI() + s.next.ServeHTTP(rw, req) +} + +func getPrefixStripped(s, prefix string) string { + return ensureLeadingSlash(strings.TrimPrefix(s, prefix)) +} + +func ensureLeadingSlash(str string) string { + return "/" + strings.TrimPrefix(str, "/") +} diff --git a/middlewares/stripprefix/strip_prefix_test.go b/middlewares/stripprefix/strip_prefix_test.go new file mode 100644 index 000000000..0c2d8a911 --- /dev/null +++ b/middlewares/stripprefix/strip_prefix_test.go @@ -0,0 +1,169 @@ +package stripprefix + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStripPrefix(t *testing.T) { + testCases := []struct { + desc string + config config.StripPrefix + path string + expectedStatusCode int + expectedPath string + expectedRawPath string + expectedHeader string + }{ + { + desc: "no prefixes configured", + config: config.StripPrefix{ + Prefixes: []string{}, + }, + path: "/noprefixes", + expectedStatusCode: http.StatusNotFound, + }, + { + desc: "wildcard (.*) requests", + config: config.StripPrefix{ + Prefixes: []string{"/"}, + }, + path: "/", + expectedStatusCode: http.StatusOK, + expectedPath: "/", + expectedHeader: "/", + }, + { + desc: "prefix and path matching", + config: config.StripPrefix{ + Prefixes: []string{"/stat"}, + }, + path: "/stat", + expectedStatusCode: http.StatusOK, + expectedPath: "/", + expectedHeader: "/stat", + }, + { + desc: "path prefix on exactly matching path", + config: config.StripPrefix{ + Prefixes: []string{"/stat/"}, + }, + path: "/stat/", + expectedStatusCode: http.StatusOK, + expectedPath: "/", + expectedHeader: "/stat/", + }, + { + desc: "path prefix on matching longer path", + config: config.StripPrefix{ + Prefixes: []string{"/stat/"}, + }, + path: "/stat/us", + expectedStatusCode: http.StatusOK, + expectedPath: "/us", + expectedHeader: "/stat/", + }, + { + desc: "path prefix on mismatching path", + config: config.StripPrefix{ + Prefixes: []string{"/stat/"}, + }, + path: "/status", + expectedStatusCode: http.StatusNotFound, + }, + { + desc: "general prefix on matching path", + config: config.StripPrefix{ + Prefixes: []string{"/stat"}, + }, + path: "/stat/", + expectedStatusCode: http.StatusOK, + expectedPath: "/", + expectedHeader: "/stat", + }, + { + desc: "earlier prefix matching", + config: config.StripPrefix{ + + Prefixes: []string{"/stat", "/stat/us"}, + }, + path: "/stat/us", + expectedStatusCode: http.StatusOK, + expectedPath: "/us", + expectedHeader: "/stat", + }, + { + desc: "later prefix matching", + config: config.StripPrefix{ + Prefixes: []string{"/mismatch", "/stat"}, + }, + path: "/stat", + expectedStatusCode: http.StatusOK, + expectedPath: "/", + expectedHeader: "/stat", + }, + { + desc: "prefix matching within slash boundaries", + config: config.StripPrefix{ + Prefixes: []string{"/stat"}, + }, + path: "/status", + expectedStatusCode: http.StatusOK, + expectedPath: "/us", + expectedHeader: "/stat", + }, + { + desc: "raw path is also stripped", + config: config.StripPrefix{ + Prefixes: []string{"/stat"}, + }, + path: "/stat/a%2Fb", + expectedStatusCode: http.StatusOK, + expectedPath: "/a/b", + expectedRawPath: "/a%2Fb", + expectedHeader: "/stat", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + var actualPath, actualRawPath, actualHeader, requestURI string + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + actualPath = r.URL.Path + actualRawPath = r.URL.RawPath + actualHeader = r.Header.Get(ForwardedPrefixHeader) + requestURI = r.RequestURI + }) + + handler, err := New(context.Background(), next, test.config, "foo-strip-prefix") + require.NoError(t, err) + + req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost"+test.path, nil) + resp := &httptest.ResponseRecorder{Code: http.StatusOK} + + handler.ServeHTTP(resp, req) + + assert.Equal(t, test.expectedStatusCode, resp.Code, "Unexpected status code.") + assert.Equal(t, test.expectedPath, actualPath, "Unexpected path.") + assert.Equal(t, test.expectedRawPath, actualRawPath, "Unexpected raw path.") + assert.Equal(t, test.expectedHeader, actualHeader, "Unexpected '%s' header.", ForwardedPrefixHeader) + + expectedURI := test.expectedPath + if test.expectedRawPath != "" { + // go HTTP uses the raw path when existent in the RequestURI + expectedURI = test.expectedRawPath + } + assert.Equal(t, expectedURI, requestURI, "Unexpected request URI.") + }) + } +} diff --git a/middlewares/stripprefixregex/strip_prefix_regex.go b/middlewares/stripprefixregex/strip_prefix_regex.go new file mode 100644 index 000000000..0fcfb15b6 --- /dev/null +++ b/middlewares/stripprefixregex/strip_prefix_regex.go @@ -0,0 +1,79 @@ +package stripprefixregex + +import ( + "context" + "net/http" + "strings" + + "github.com/containous/mux" + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/middlewares/stripprefix" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" +) + +const ( + typeName = "StripPrefixRegex" +) + +// StripPrefixRegex is a middleware used to strip prefix from an URL request. +type stripPrefixRegex struct { + next http.Handler + router *mux.Router + name string +} + +// New builds a new StripPrefixRegex middleware. +func New(ctx context.Context, next http.Handler, config config.StripPrefixRegex, name string) (http.Handler, error) { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + + stripPrefix := stripPrefixRegex{ + next: next, + router: mux.NewRouter(), + name: name, + } + + for _, prefix := range config.Regex { + stripPrefix.router.PathPrefix(prefix) + } + + return &stripPrefix, nil +} + +func (s *stripPrefixRegex) GetTracingInformation() (string, ext.SpanKindEnum) { + return s.name, tracing.SpanKindNoneEnum +} + +func (s *stripPrefixRegex) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + var match mux.RouteMatch + if s.router.Match(req, &match) { + params := make([]string, 0, len(match.Vars)*2) + for key, val := range match.Vars { + params = append(params, key) + params = append(params, val) + } + + prefix, err := match.Route.URL(params...) + if err != nil || len(prefix.Path) > len(req.URL.Path) { + logger := middlewares.GetLogger(req.Context(), s.name, typeName) + logger.Error("Error in stripPrefix middleware", err) + return + } + + req.URL.Path = req.URL.Path[len(prefix.Path):] + if req.URL.RawPath != "" { + req.URL.RawPath = req.URL.RawPath[len(prefix.Path):] + } + req.Header.Add(stripprefix.ForwardedPrefixHeader, prefix.Path) + req.RequestURI = ensureLeadingSlash(req.URL.RequestURI()) + + s.next.ServeHTTP(rw, req) + return + } + http.NotFound(rw, req) +} + +func ensureLeadingSlash(str string) string { + return "/" + strings.TrimPrefix(str, "/") +} diff --git a/middlewares/stripprefixregex/strip_prefix_regex_test.go b/middlewares/stripprefixregex/strip_prefix_regex_test.go new file mode 100644 index 000000000..c882fbeb7 --- /dev/null +++ b/middlewares/stripprefixregex/strip_prefix_regex_test.go @@ -0,0 +1,104 @@ +package stripprefixregex + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares/stripprefix" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStripPrefixRegex(t *testing.T) { + testPrefixRegex := config.StripPrefixRegex{ + Regex: []string{"/a/api/", "/b/{regex}/", "/c/{category}/{id:[0-9]+}/"}, + } + + testCases := []struct { + path string + expectedStatusCode int + expectedPath string + expectedRawPath string + expectedHeader string + }{ + { + path: "/a/test", + expectedStatusCode: http.StatusNotFound, + }, + { + path: "/a/api/test", + expectedStatusCode: http.StatusOK, + expectedPath: "test", + expectedHeader: "/a/api/", + }, + { + path: "/b/api/", + expectedStatusCode: http.StatusOK, + expectedHeader: "/b/api/", + }, + { + path: "/b/api/test1", + expectedStatusCode: http.StatusOK, + expectedPath: "test1", + expectedHeader: "/b/api/", + }, + { + path: "/b/api2/test2", + expectedStatusCode: http.StatusOK, + expectedPath: "test2", + expectedHeader: "/b/api2/", + }, + { + path: "/c/api/123/", + expectedStatusCode: http.StatusOK, + expectedHeader: "/c/api/123/", + }, + { + path: "/c/api/123/test3", + expectedStatusCode: http.StatusOK, + expectedPath: "test3", + expectedHeader: "/c/api/123/", + }, + { + path: "/c/api/abc/test4", + expectedStatusCode: http.StatusNotFound, + }, + { + path: "/a/api/a%2Fb", + expectedStatusCode: http.StatusOK, + expectedPath: "a/b", + expectedRawPath: "a%2Fb", + expectedHeader: "/a/api/", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.path, func(t *testing.T) { + t.Parallel() + + var actualPath, actualRawPath, actualHeader string + handlerPath := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + actualPath = r.URL.Path + actualRawPath = r.URL.RawPath + actualHeader = r.Header.Get(stripprefix.ForwardedPrefixHeader) + }) + handler, err := New(context.Background(), handlerPath, testPrefixRegex, "foo-strip-prefix-regex") + require.NoError(t, err) + + req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost"+test.path, nil) + resp := &httptest.ResponseRecorder{Code: http.StatusOK} + + handler.ServeHTTP(resp, req) + + assert.Equal(t, test.expectedStatusCode, resp.Code, "Unexpected status code.") + assert.Equal(t, test.expectedPath, actualPath, "Unexpected path.") + assert.Equal(t, test.expectedRawPath, actualRawPath, "Unexpected raw path.") + assert.Equal(t, test.expectedHeader, actualHeader, "Unexpected '%s' header.", stripprefix.ForwardedPrefixHeader) + }) + } +} diff --git a/middlewares/tracing/entrypoint.go b/middlewares/tracing/entrypoint.go index bf2a0879e..a2b832258 100644 --- a/middlewares/tracing/entrypoint.go +++ b/middlewares/tracing/entrypoint.go @@ -1,57 +1,57 @@ package tracing import ( - "fmt" + "context" "net/http" - "github.com/containous/traefik/log" + "github.com/containous/alice" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" - "github.com/urfave/negroni" ) -type entryPointMiddleware struct { - entryPoint string - *Tracing -} +const ( + entryPointTypeName = "TracingEntryPoint" +) -// NewEntryPoint creates a new middleware that the incoming request -func (t *Tracing) NewEntryPoint(name string) negroni.Handler { - log.Debug("Added entrypoint tracing middleware") - return &entryPointMiddleware{Tracing: t, entryPoint: name} -} +// NewEntryPoint creates a new middleware that the incoming request. +func NewEntryPoint(ctx context.Context, t *tracing.Tracing, entryPointName string, next http.Handler) http.Handler { + middlewares.GetLogger(ctx, "tracing", entryPointTypeName).Debug("Creating middleware") -func (e *entryPointMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - opNameFunc := generateEntryPointSpanName - - ctx, _ := e.Extract(opentracing.HTTPHeaders, HTTPHeadersCarrier(r.Header)) - span := e.StartSpan(opNameFunc(r, e.entryPoint, e.SpanNameLimit), ext.RPCServerOption(ctx)) - ext.Component.Set(span, e.ServiceName) - LogRequest(span, r) - ext.SpanKindRPCServer.Set(span) - - r = r.WithContext(opentracing.ContextWithSpan(r.Context(), span)) - - recorder := newStatusCodeRecoder(w, 200) - next(recorder, r) - - LogResponseCode(span, recorder.Status()) - span.Finish() -} - -// generateEntryPointSpanName will return a Span name of an appropriate lenth based on the 'spanLimit' argument. If needed, it will be truncated, but will not be less than 24 characters. -func generateEntryPointSpanName(r *http.Request, entryPoint string, spanLimit int) string { - name := fmt.Sprintf("Entrypoint %s %s", entryPoint, r.Host) - - if spanLimit > 0 && len(name) > spanLimit { - if spanLimit < EntryPointMaxLengthNumber { - log.Warnf("SpanNameLimit is set to be less than required static number of characters, defaulting to %d + 3", EntryPointMaxLengthNumber) - spanLimit = EntryPointMaxLengthNumber + 3 - } - hash := computeHash(name) - limit := (spanLimit - EntryPointMaxLengthNumber) / 2 - name = fmt.Sprintf("Entrypoint %s %s %s", truncateString(entryPoint, limit), truncateString(r.Host, limit), hash) + return &entryPointMiddleware{ + entryPoint: entryPointName, + Tracing: t, + next: next, + } +} + +type entryPointMiddleware struct { + *tracing.Tracing + entryPoint string + next http.Handler +} + +func (e *entryPointMiddleware) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + spanCtx, _ := e.Extract(opentracing.HTTPHeaders, tracing.HTTPHeadersCarrier(req.Header)) + + span, req, finish := e.StartSpanf(req, ext.SpanKindRPCServerEnum, "EntryPoint", []string{e.entryPoint, req.Host}, " ", ext.RPCServerOption(spanCtx)) + defer finish() + + ext.Component.Set(span, e.ServiceName) + tracing.LogRequest(span, req) + + req = req.WithContext(tracing.WithTracing(req.Context(), e.Tracing)) + + recorder := newStatusCodeRecoder(rw, http.StatusOK) + e.next.ServeHTTP(recorder, req) + + tracing.LogResponseCode(span, recorder.Status()) +} + +// WrapEntryPointHandler Wraps tracing to alice.Constructor. +func WrapEntryPointHandler(ctx context.Context, tracer *tracing.Tracing, entryPointName string) alice.Constructor { + return func(next http.Handler) (http.Handler, error) { + return NewEntryPoint(ctx, tracer, entryPointName, next), nil } - - return name } diff --git a/middlewares/tracing/entrypoint_test.go b/middlewares/tracing/entrypoint_test.go index 865bcfc09..24338c43c 100644 --- a/middlewares/tracing/entrypoint_test.go +++ b/middlewares/tracing/entrypoint_test.go @@ -1,70 +1,87 @@ package tracing import ( + "context" "net/http" "net/http/httptest" "testing" + "github.com/containous/traefik/tracing" "github.com/opentracing/opentracing-go/ext" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestEntryPointMiddlewareServeHTTP(t *testing.T) { - expectedTags := map[string]interface{}{ - "span.kind": ext.SpanKindRPCServerEnum, - "http.method": "GET", - "component": "", - "http.url": "http://www.test.com", - "http.host": "www.test.com", +func TestEntryPointMiddleware(t *testing.T) { + type expected struct { + Tags map[string]interface{} + OperationName string } testCases := []struct { - desc string - entryPoint string - tracing *Tracing - expectedTags map[string]interface{} - expectedName string + desc string + entryPoint string + spanNameLimit int + tracing *trackingBackenMock + expected expected }{ { - desc: "no truncation test", - entryPoint: "test", - tracing: &Tracing{ - SpanNameLimit: 0, - tracer: &MockTracer{Span: &MockSpan{Tags: make(map[string]interface{})}}, + desc: "no truncation test", + entryPoint: "test", + spanNameLimit: 0, + tracing: &trackingBackenMock{ + tracer: &MockTracer{Span: &MockSpan{Tags: make(map[string]interface{})}}, }, - expectedTags: expectedTags, - expectedName: "Entrypoint test www.test.com", - }, { - desc: "basic test", - entryPoint: "test", - tracing: &Tracing{ - SpanNameLimit: 25, - tracer: &MockTracer{Span: &MockSpan{Tags: make(map[string]interface{})}}, + expected: expected{ + Tags: map[string]interface{}{ + "span.kind": ext.SpanKindRPCServerEnum, + "http.method": http.MethodGet, + "component": "", + "http.url": "http://www.test.com", + "http.host": "www.test.com", + }, + OperationName: "EntryPoint test www.test.com", + }, + }, + { + desc: "basic test", + entryPoint: "test", + spanNameLimit: 25, + tracing: &trackingBackenMock{ + tracer: &MockTracer{Span: &MockSpan{Tags: make(map[string]interface{})}}, + }, + expected: expected{ + Tags: map[string]interface{}{ + "span.kind": ext.SpanKindRPCServerEnum, + "http.method": http.MethodGet, + "component": "", + "http.url": "http://www.test.com", + "http.host": "www.test.com", + }, + OperationName: "EntryPoint te... ww... 0c15301b", }, - expectedTags: expectedTags, - expectedName: "Entrypoint te... ww... 39b97e58", }, } for _, test := range testCases { - test := test t.Run(test.desc, func(t *testing.T) { - t.Parallel() - e := &entryPointMiddleware{ - entryPoint: test.entryPoint, - Tracing: test.tracing, - } + newTracing, err := tracing.NewTracing("", test.spanNameLimit, test.tracing) + require.NoError(t, err) - next := func(http.ResponseWriter, *http.Request) { + req := httptest.NewRequest(http.MethodGet, "http://www.test.com", nil) + rw := httptest.NewRecorder() + + next := http.HandlerFunc(func(http.ResponseWriter, *http.Request) { span := test.tracing.tracer.(*MockTracer).Span - actual := span.Tags - assert.Equal(t, test.expectedTags, actual) - assert.Equal(t, test.expectedName, span.OpName) - } + tags := span.Tags + assert.Equal(t, test.expected.Tags, tags) + assert.Equal(t, test.expected.OperationName, span.OpName) + }) - e.ServeHTTP(httptest.NewRecorder(), httptest.NewRequest(http.MethodGet, "http://www.test.com", nil), next) + handler := NewEntryPoint(context.Background(), newTracing, test.entryPoint, next) + handler.ServeHTTP(rw, req) }) } } diff --git a/middlewares/tracing/forwarder.go b/middlewares/tracing/forwarder.go index fd4f243bf..faa092d93 100644 --- a/middlewares/tracing/forwarder.go +++ b/middlewares/tracing/forwarder.go @@ -1,63 +1,58 @@ package tracing import ( - "fmt" + "context" "net/http" - "github.com/containous/traefik/log" + "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/tracing" "github.com/opentracing/opentracing-go/ext" - "github.com/urfave/negroni" +) + +const ( + forwarderTypeName = "TracingForwarder" ) type forwarderMiddleware struct { - frontend string - backend string - opName string - *Tracing + router string + service string + next http.Handler } -// NewForwarderMiddleware creates a new forwarder middleware that traces the outgoing request -func (t *Tracing) NewForwarderMiddleware(frontend, backend string) negroni.Handler { - log.Debugf("Added outgoing tracing middleware %s", frontend) +// NewForwarder creates a new forwarder middleware that traces the outgoing request. +func NewForwarder(ctx context.Context, router, service string, next http.Handler) http.Handler { + middlewares.GetLogger(ctx, "tracing", forwarderTypeName). + Debugf("Added outgoing tracing middleware %s", service) + return &forwarderMiddleware{ - Tracing: t, - frontend: frontend, - backend: backend, - opName: generateForwardSpanName(frontend, backend, t.SpanNameLimit), + router: router, + service: service, + next: next, } } -func (f *forwarderMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - span, r, finish := StartSpan(r, f.opName, true) +func (f *forwarderMiddleware) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + tr, err := tracing.FromContext(req.Context()) + if err != nil { + f.next.ServeHTTP(rw, req) + return + } + + opParts := []string{f.service, f.router} + span, req, finish := tr.StartSpanf(req, ext.SpanKindRPCClientEnum, "forward", opParts, "/") defer finish() - span.SetTag("frontend.name", f.frontend) - span.SetTag("backend.name", f.backend) - ext.HTTPMethod.Set(span, r.Method) - ext.HTTPUrl.Set(span, fmt.Sprintf("%s%s", r.URL.String(), r.RequestURI)) - span.SetTag("http.host", r.Host) - InjectRequestHeaders(r) + span.SetTag("service.name", f.service) + span.SetTag("router.name", f.router) + ext.HTTPMethod.Set(span, req.Method) + ext.HTTPUrl.Set(span, req.URL.String()) + span.SetTag("http.host", req.Host) - recorder := newStatusCodeRecoder(w, 200) + tracing.InjectRequestHeaders(req) - next(recorder, r) + recorder := newStatusCodeRecoder(rw, 200) - LogResponseCode(span, recorder.Status()) -} - -// generateForwardSpanName will return a Span name of an appropriate lenth based on the 'spanLimit' argument. If needed, it will be truncated, but will not be less than 21 characters -func generateForwardSpanName(frontend, backend string, spanLimit int) string { - name := fmt.Sprintf("forward %s/%s", frontend, backend) - - if spanLimit > 0 && len(name) > spanLimit { - if spanLimit < ForwardMaxLengthNumber { - log.Warnf("SpanNameLimit is set to be less than required static number of characters, defaulting to %d + 3", ForwardMaxLengthNumber) - spanLimit = ForwardMaxLengthNumber + 3 - } - hash := computeHash(name) - limit := (spanLimit - ForwardMaxLengthNumber) / 2 - name = fmt.Sprintf("forward %s/%s/%s", truncateString(frontend, limit), truncateString(backend, limit), hash) - } - - return name + f.next.ServeHTTP(recorder, req) + + tracing.LogResponseCode(span, recorder.Status()) } diff --git a/middlewares/tracing/forwarder_test.go b/middlewares/tracing/forwarder_test.go index 00c90c293..c53fbe7cf 100644 --- a/middlewares/tracing/forwarder_test.go +++ b/middlewares/tracing/forwarder_test.go @@ -1,93 +1,136 @@ package tracing import ( + "context" + "net/http" + "net/http/httptest" "testing" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestTracingNewForwarderMiddleware(t *testing.T) { +func TestNewForwarder(t *testing.T) { + type expected struct { + Tags map[string]interface{} + OperationName string + } + testCases := []struct { - desc string - tracer *Tracing - frontend string - backend string - expected *forwarderMiddleware + desc string + spanNameLimit int + tracing *trackingBackenMock + service string + router string + expected expected }{ { - desc: "Simple Forward Tracer without truncation and hashing", - tracer: &Tracing{ - SpanNameLimit: 101, + desc: "Simple Forward Tracer without truncation and hashing", + spanNameLimit: 101, + tracing: &trackingBackenMock{ + tracer: &MockTracer{Span: &MockSpan{Tags: make(map[string]interface{})}}, }, - frontend: "some-service.domain.tld", - backend: "some-service.domain.tld", - expected: &forwarderMiddleware{ - Tracing: &Tracing{ - SpanNameLimit: 101, + service: "some-service.domain.tld", + router: "some-service.domain.tld", + expected: expected{ + Tags: map[string]interface{}{ + "http.host": "www.test.com", + "http.method": "GET", + "http.url": "http://www.test.com/toto", + "service.name": "some-service.domain.tld", + "router.name": "some-service.domain.tld", + "span.kind": ext.SpanKindRPCClientEnum, }, - frontend: "some-service.domain.tld", - backend: "some-service.domain.tld", - opName: "forward some-service.domain.tld/some-service.domain.tld", + OperationName: "forward some-service.domain.tld/some-service.domain.tld", }, }, { - desc: "Simple Forward Tracer with truncation and hashing", - tracer: &Tracing{ - SpanNameLimit: 101, + desc: "Simple Forward Tracer with truncation and hashing", + spanNameLimit: 101, + tracing: &trackingBackenMock{ + tracer: &MockTracer{Span: &MockSpan{Tags: make(map[string]interface{})}}, }, - frontend: "some-service-100.slug.namespace.environment.domain.tld", - backend: "some-service-100.slug.namespace.environment.domain.tld", - expected: &forwarderMiddleware{ - Tracing: &Tracing{ - SpanNameLimit: 101, + service: "some-service-100.slug.namespace.environment.domain.tld", + router: "some-service-100.slug.namespace.environment.domain.tld", + expected: expected{ + Tags: map[string]interface{}{ + "http.host": "www.test.com", + "http.method": "GET", + "http.url": "http://www.test.com/toto", + "service.name": "some-service-100.slug.namespace.environment.domain.tld", + "router.name": "some-service-100.slug.namespace.environment.domain.tld", + "span.kind": ext.SpanKindRPCClientEnum, }, - frontend: "some-service-100.slug.namespace.environment.domain.tld", - backend: "some-service-100.slug.namespace.environment.domain.tld", - opName: "forward some-service-100.slug.namespace.enviro.../some-service-100.slug.namespace.enviro.../bc4a0d48", + OperationName: "forward some-service-100.slug.namespace.enviro.../some-service-100.slug.namespace.enviro.../bc4a0d48", }, }, { - desc: "Exactly 101 chars", - tracer: &Tracing{ - SpanNameLimit: 101, + desc: "Exactly 101 chars", + spanNameLimit: 101, + tracing: &trackingBackenMock{ + tracer: &MockTracer{Span: &MockSpan{Tags: make(map[string]interface{})}}, }, - frontend: "some-service1.namespace.environment.domain.tld", - backend: "some-service1.namespace.environment.domain.tld", - expected: &forwarderMiddleware{ - Tracing: &Tracing{ - SpanNameLimit: 101, + service: "some-service1.namespace.environment.domain.tld", + router: "some-service1.namespace.environment.domain.tld", + expected: expected{ + Tags: map[string]interface{}{ + "http.host": "www.test.com", + "http.method": "GET", + "http.url": "http://www.test.com/toto", + "service.name": "some-service1.namespace.environment.domain.tld", + "router.name": "some-service1.namespace.environment.domain.tld", + "span.kind": ext.SpanKindRPCClientEnum, }, - frontend: "some-service1.namespace.environment.domain.tld", - backend: "some-service1.namespace.environment.domain.tld", - opName: "forward some-service1.namespace.environment.domain.tld/some-service1.namespace.environment.domain.tld", + OperationName: "forward some-service1.namespace.environment.domain.tld/some-service1.namespace.environment.domain.tld", }, }, { - desc: "More than 101 chars", - tracer: &Tracing{ - SpanNameLimit: 101, + desc: "More than 101 chars", + spanNameLimit: 101, + tracing: &trackingBackenMock{ + tracer: &MockTracer{Span: &MockSpan{Tags: make(map[string]interface{})}}, }, - frontend: "some-service1.frontend.namespace.environment.domain.tld", - backend: "some-service1.backend.namespace.environment.domain.tld", - expected: &forwarderMiddleware{ - Tracing: &Tracing{ - SpanNameLimit: 101, + service: "some-service1.frontend.namespace.environment.domain.tld", + router: "some-service1.backend.namespace.environment.domain.tld", + expected: expected{ + Tags: map[string]interface{}{ + "http.host": "www.test.com", + "http.method": "GET", + "http.url": "http://www.test.com/toto", + "service.name": "some-service1.frontend.namespace.environment.domain.tld", + "router.name": "some-service1.backend.namespace.environment.domain.tld", + "span.kind": ext.SpanKindRPCClientEnum, }, - frontend: "some-service1.frontend.namespace.environment.domain.tld", - backend: "some-service1.backend.namespace.environment.domain.tld", - opName: "forward some-service1.frontend.namespace.envir.../some-service1.backend.namespace.enviro.../fa49dd23", + OperationName: "forward some-service1.frontend.namespace.envir.../some-service1.backend.namespace.enviro.../fa49dd23", }, }, } for _, test := range testCases { - test := test t.Run(test.desc, func(t *testing.T) { - t.Parallel() - actual := test.tracer.NewForwarderMiddleware(test.frontend, test.backend) + newTracing, err := tracing.NewTracing("", test.spanNameLimit, test.tracing) + require.NoError(t, err) - assert.Equal(t, test.expected, actual) - assert.True(t, len(test.expected.opName) <= test.tracer.SpanNameLimit) + req := httptest.NewRequest(http.MethodGet, "http://www.test.com/toto", nil) + req = req.WithContext(tracing.WithTracing(req.Context(), newTracing)) + + rw := httptest.NewRecorder() + + next := http.HandlerFunc(func(http.ResponseWriter, *http.Request) { + span := test.tracing.tracer.(*MockTracer).Span + + tags := span.Tags + assert.Equal(t, test.expected.Tags, tags) + assert.True(t, len(test.expected.OperationName) <= test.spanNameLimit, + "the len of the operation name %q [len: %d] doesn't respect limit %d", + test.expected.OperationName, len(test.expected.OperationName), test.spanNameLimit) + assert.Equal(t, test.expected.OperationName, span.OpName) + }) + + handler := NewForwarder(context.Background(), test.router, test.service, next) + handler.ServeHTTP(rw, req) }) } } diff --git a/middlewares/tracing/mock_tracing_test.go b/middlewares/tracing/mock_tracing_test.go new file mode 100644 index 000000000..6ac60b738 --- /dev/null +++ b/middlewares/tracing/mock_tracing_test.go @@ -0,0 +1,70 @@ +package tracing + +import ( + "io" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/log" +) + +type MockTracer struct { + Span *MockSpan +} + +// StartSpan belongs to the Tracer interface. +func (n MockTracer) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span { + n.Span.OpName = operationName + return n.Span +} + +// Inject belongs to the Tracer interface. +func (n MockTracer) Inject(sp opentracing.SpanContext, format interface{}, carrier interface{}) error { + return nil +} + +// Extract belongs to the Tracer interface. +func (n MockTracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { + return nil, opentracing.ErrSpanContextNotFound +} + +// MockSpanContext +type MockSpanContext struct{} + +func (n MockSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +// MockSpan +type MockSpan struct { + OpName string + Tags map[string]interface{} +} + +func (n MockSpan) Context() opentracing.SpanContext { return MockSpanContext{} } +func (n MockSpan) SetBaggageItem(key, val string) opentracing.Span { + return MockSpan{Tags: make(map[string]interface{})} +} +func (n MockSpan) BaggageItem(key string) string { return "" } +func (n MockSpan) SetTag(key string, value interface{}) opentracing.Span { + n.Tags[key] = value + return n +} +func (n MockSpan) LogFields(fields ...log.Field) {} +func (n MockSpan) LogKV(keyVals ...interface{}) {} +func (n MockSpan) Finish() {} +func (n MockSpan) FinishWithOptions(opts opentracing.FinishOptions) {} +func (n MockSpan) SetOperationName(operationName string) opentracing.Span { return n } +func (n MockSpan) Tracer() opentracing.Tracer { return MockTracer{} } +func (n MockSpan) LogEvent(event string) {} +func (n MockSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n MockSpan) Log(data opentracing.LogData) {} +func (n MockSpan) Reset() { + n.Tags = make(map[string]interface{}) +} + +type trackingBackenMock struct { + tracer opentracing.Tracer +} + +func (t *trackingBackenMock) Setup(componentName string) (opentracing.Tracer, io.Closer, error) { + opentracing.SetGlobalTracer(t.tracer) + return t.tracer, nil, nil +} diff --git a/middlewares/tracing/wrapper.go b/middlewares/tracing/wrapper.go index 8e9c566c1..e0d1810d6 100644 --- a/middlewares/tracing/wrapper.go +++ b/middlewares/tracing/wrapper.go @@ -1,66 +1,68 @@ package tracing import ( + "context" "net/http" - "github.com/urfave/negroni" + "github.com/containous/alice" + "github.com/containous/traefik/log" + "github.com/containous/traefik/tracing" + "github.com/opentracing/opentracing-go/ext" ) -// NewNegroniHandlerWrapper return a negroni.Handler struct -func (t *Tracing) NewNegroniHandlerWrapper(name string, handler negroni.Handler, clientSpanKind bool) negroni.Handler { - if t.IsEnabled() && handler != nil { - return &NegroniHandlerWrapper{ - name: name, - next: handler, - clientSpanKind: clientSpanKind, +// Tracable embeds tracing information. +type Tracable interface { + GetTracingInformation() (name string, spanKind ext.SpanKindEnum) +} + +// Wrap adds tracability to an alice.Constructor. +func Wrap(ctx context.Context, constructor alice.Constructor) alice.Constructor { + return func(next http.Handler) (http.Handler, error) { + if constructor == nil { + return nil, nil } - } - return handler -} - -// NewHTTPHandlerWrapper return a http.Handler struct -func (t *Tracing) NewHTTPHandlerWrapper(name string, handler http.Handler, clientSpanKind bool) http.Handler { - if t.IsEnabled() && handler != nil { - return &HTTPHandlerWrapper{ - name: name, - handler: handler, - clientSpanKind: clientSpanKind, + handler, err := constructor(next) + if err != nil { + return nil, err } + + if tracableHandler, ok := handler.(Tracable); ok { + name, spanKind := tracableHandler.GetTracingInformation() + log.FromContext(ctx).WithField(log.MiddlewareName, name).Debug("Adding tracing to middleware") + return NewWrapper(handler, name, spanKind), nil + } + return handler, nil } - return handler } -// NegroniHandlerWrapper is used to wrap negroni handler middleware -type NegroniHandlerWrapper struct { - name string - next negroni.Handler - clientSpanKind bool +// NewWrapper returns a http.Handler struct +func NewWrapper(next http.Handler, name string, spanKind ext.SpanKindEnum) http.Handler { + return &Wrapper{ + next: next, + name: name, + spanKind: spanKind, + } } -func (t *NegroniHandlerWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { +// Wrapper is used to wrap http handler middleware. +type Wrapper struct { + next http.Handler + name string + spanKind ext.SpanKindEnum +} + +func (w *Wrapper) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + _, err := tracing.FromContext(req.Context()) + if err != nil { + w.next.ServeHTTP(rw, req) + return + } + var finish func() - _, r, finish = StartSpan(r, t.name, t.clientSpanKind) + _, req, finish = tracing.StartSpan(req, w.name, w.spanKind) defer finish() - if t.next != nil { - t.next.ServeHTTP(rw, r, next) + if w.next != nil { + w.next.ServeHTTP(rw, req) } } - -// HTTPHandlerWrapper is used to wrap http handler middleware -type HTTPHandlerWrapper struct { - name string - handler http.Handler - clientSpanKind bool -} - -func (t *HTTPHandlerWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - var finish func() - _, r, finish = StartSpan(r, t.name, t.clientSpanKind) - defer finish() - - if t.handler != nil { - t.handler.ServeHTTP(rw, r) - } - -} diff --git a/old/api/dashboard.go b/old/api/dashboard.go new file mode 100644 index 000000000..bb7c60d15 --- /dev/null +++ b/old/api/dashboard.go @@ -0,0 +1,39 @@ +package api + +import ( + "net/http" + + "github.com/containous/mux" + "github.com/containous/traefik/old/log" + "github.com/elazarl/go-bindata-assetfs" +) + +// DashboardHandler expose dashboard routes +type DashboardHandler struct { + Assets *assetfs.AssetFS +} + +// AddRoutes add dashboard routes on a router +func (g DashboardHandler) AddRoutes(router *mux.Router) { + if g.Assets == nil { + log.Error("No assets for dashboard") + return + } + + // Expose dashboard + router.Methods(http.MethodGet). + Path("/"). + HandlerFunc(func(response http.ResponseWriter, request *http.Request) { + http.Redirect(response, request, request.Header.Get("X-Forwarded-Prefix")+"/dashboard/", 302) + }) + + router.Methods(http.MethodGet). + Path("/dashboard/status"). + HandlerFunc(func(response http.ResponseWriter, request *http.Request) { + http.Redirect(response, request, "/dashboard/", 302) + }) + + router.Methods(http.MethodGet). + PathPrefix("/dashboard/"). + Handler(http.StripPrefix("/dashboard/", http.FileServer(g.Assets))) +} diff --git a/old/api/debug.go b/old/api/debug.go new file mode 100644 index 000000000..785a61988 --- /dev/null +++ b/old/api/debug.go @@ -0,0 +1,48 @@ +package api + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + "runtime" + + "github.com/containous/mux" +) + +func init() { + expvar.Publish("Goroutines", expvar.Func(goroutines)) +} + +func goroutines() interface{} { + return runtime.NumGoroutine() +} + +// DebugHandler expose debug routes +type DebugHandler struct{} + +// AddRoutes add debug routes on a router +func (g DebugHandler) AddRoutes(router *mux.Router) { + router.Methods(http.MethodGet).Path("/debug/vars"). + HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprint(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprint(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprint(w, "\n}\n") + }) + + runtime.SetBlockProfileRate(1) + runtime.SetMutexProfileFraction(5) + router.Methods(http.MethodGet).PathPrefix("/debug/pprof/cmdline").HandlerFunc(pprof.Cmdline) + router.Methods(http.MethodGet).PathPrefix("/debug/pprof/profile").HandlerFunc(pprof.Profile) + router.Methods(http.MethodGet).PathPrefix("/debug/pprof/symbol").HandlerFunc(pprof.Symbol) + router.Methods(http.MethodGet).PathPrefix("/debug/pprof/trace").HandlerFunc(pprof.Trace) + router.Methods(http.MethodGet).PathPrefix("/debug/pprof/").HandlerFunc(pprof.Index) +} diff --git a/old/api/handler.go b/old/api/handler.go new file mode 100644 index 000000000..3e03f3491 --- /dev/null +++ b/old/api/handler.go @@ -0,0 +1,252 @@ +package api + +import ( + "net/http" + + "github.com/containous/mux" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/middlewares" + "github.com/containous/traefik/old/types" + "github.com/containous/traefik/safe" + "github.com/containous/traefik/version" + "github.com/elazarl/go-bindata-assetfs" + thoas_stats "github.com/thoas/stats" + "github.com/unrolled/render" +) + +// Handler expose api routes +type Handler struct { + EntryPoint string `description:"EntryPoint" export:"true"` + Dashboard bool `description:"Activate dashboard" export:"true"` + Debug bool `export:"true"` + CurrentConfigurations *safe.Safe + Statistics *types.Statistics `description:"Enable more detailed statistics" export:"true"` + Stats *thoas_stats.Stats `json:"-"` + StatsRecorder *middlewares.StatsRecorder `json:"-"` + DashboardAssets *assetfs.AssetFS `json:"-"` +} + +var ( + templatesRenderer = render.New(render.Options{ + Directory: "nowhere", + }) +) + +// AddRoutes add api routes on a router +func (p Handler) AddRoutes(router *mux.Router) { + if p.Debug { + DebugHandler{}.AddRoutes(router) + } + + router.Methods(http.MethodGet).Path("/api").HandlerFunc(p.getConfigHandler) + router.Methods(http.MethodGet).Path("/api/providers").HandlerFunc(p.getConfigHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}").HandlerFunc(p.getProviderHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/backends").HandlerFunc(p.getBackendsHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/backends/{backend}").HandlerFunc(p.getBackendHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/backends/{backend}/servers").HandlerFunc(p.getServersHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/backends/{backend}/servers/{server}").HandlerFunc(p.getServerHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/frontends").HandlerFunc(p.getFrontendsHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/frontends/{frontend}").HandlerFunc(p.getFrontendHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/frontends/{frontend}/routes").HandlerFunc(p.getRoutesHandler) + router.Methods(http.MethodGet).Path("/api/providers/{provider}/frontends/{frontend}/routes/{route}").HandlerFunc(p.getRouteHandler) + + // health route + router.Methods(http.MethodGet).Path("/health").HandlerFunc(p.getHealthHandler) + + version.Handler{}.Append(router) + + if p.Dashboard { + DashboardHandler{Assets: p.DashboardAssets}.AddRoutes(router) + } +} + +func getProviderIDFromVars(vars map[string]string) string { + providerID := vars["provider"] + // TODO: Deprecated + if providerID == "rest" { + providerID = "web" + } + return providerID +} + +func (p Handler) getConfigHandler(response http.ResponseWriter, request *http.Request) { + currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) + err := templatesRenderer.JSON(response, http.StatusOK, currentConfigurations) + if err != nil { + log.Error(err) + } +} + +func (p Handler) getProviderHandler(response http.ResponseWriter, request *http.Request) { + providerID := getProviderIDFromVars(mux.Vars(request)) + + currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) + if provider, ok := currentConfigurations[providerID]; ok { + err := templatesRenderer.JSON(response, http.StatusOK, provider) + if err != nil { + log.Error(err) + } + } else { + http.NotFound(response, request) + } +} + +func (p Handler) getBackendsHandler(response http.ResponseWriter, request *http.Request) { + providerID := getProviderIDFromVars(mux.Vars(request)) + + currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) + if provider, ok := currentConfigurations[providerID]; ok { + err := templatesRenderer.JSON(response, http.StatusOK, provider.Backends) + if err != nil { + log.Error(err) + } + } else { + http.NotFound(response, request) + } +} + +func (p Handler) getBackendHandler(response http.ResponseWriter, request *http.Request) { + vars := mux.Vars(request) + providerID := getProviderIDFromVars(vars) + backendID := vars["backend"] + + currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) + if provider, ok := currentConfigurations[providerID]; ok { + if backend, ok := provider.Backends[backendID]; ok { + err := templatesRenderer.JSON(response, http.StatusOK, backend) + if err != nil { + log.Error(err) + } + return + } + } + http.NotFound(response, request) +} + +func (p Handler) getServersHandler(response http.ResponseWriter, request *http.Request) { + vars := mux.Vars(request) + providerID := getProviderIDFromVars(vars) + backendID := vars["backend"] + + currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) + if provider, ok := currentConfigurations[providerID]; ok { + if backend, ok := provider.Backends[backendID]; ok { + err := templatesRenderer.JSON(response, http.StatusOK, backend.Servers) + if err != nil { + log.Error(err) + } + return + } + } + http.NotFound(response, request) +} + +func (p Handler) getServerHandler(response http.ResponseWriter, request *http.Request) { + vars := mux.Vars(request) + providerID := getProviderIDFromVars(vars) + backendID := vars["backend"] + serverID := vars["server"] + + currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) + if provider, ok := currentConfigurations[providerID]; ok { + if backend, ok := provider.Backends[backendID]; ok { + if server, ok := backend.Servers[serverID]; ok { + err := templatesRenderer.JSON(response, http.StatusOK, server) + if err != nil { + log.Error(err) + } + return + } + } + } + http.NotFound(response, request) +} + +func (p Handler) getFrontendsHandler(response http.ResponseWriter, request *http.Request) { + providerID := getProviderIDFromVars(mux.Vars(request)) + + currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) + if provider, ok := currentConfigurations[providerID]; ok { + err := templatesRenderer.JSON(response, http.StatusOK, provider.Frontends) + if err != nil { + log.Error(err) + } + } else { + http.NotFound(response, request) + } +} + +func (p Handler) getFrontendHandler(response http.ResponseWriter, request *http.Request) { + vars := mux.Vars(request) + providerID := getProviderIDFromVars(vars) + frontendID := vars["frontend"] + + currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) + if provider, ok := currentConfigurations[providerID]; ok { + if frontend, ok := provider.Frontends[frontendID]; ok { + err := templatesRenderer.JSON(response, http.StatusOK, frontend) + if err != nil { + log.Error(err) + } + return + } + } + http.NotFound(response, request) +} + +func (p Handler) getRoutesHandler(response http.ResponseWriter, request *http.Request) { + vars := mux.Vars(request) + providerID := getProviderIDFromVars(vars) + frontendID := vars["frontend"] + + currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) + if provider, ok := currentConfigurations[providerID]; ok { + if frontend, ok := provider.Frontends[frontendID]; ok { + err := templatesRenderer.JSON(response, http.StatusOK, frontend.Routes) + if err != nil { + log.Error(err) + } + return + } + } + http.NotFound(response, request) +} + +func (p Handler) getRouteHandler(response http.ResponseWriter, request *http.Request) { + vars := mux.Vars(request) + providerID := getProviderIDFromVars(vars) + frontendID := vars["frontend"] + routeID := vars["route"] + + currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) + if provider, ok := currentConfigurations[providerID]; ok { + if frontend, ok := provider.Frontends[frontendID]; ok { + if route, ok := frontend.Routes[routeID]; ok { + err := templatesRenderer.JSON(response, http.StatusOK, route) + if err != nil { + log.Error(err) + } + return + } + } + } + http.NotFound(response, request) +} + +// healthResponse combines data returned by thoas/stats with statistics (if +// they are enabled). +type healthResponse struct { + *thoas_stats.Data + *middlewares.Stats +} + +func (p *Handler) getHealthHandler(response http.ResponseWriter, request *http.Request) { + health := &healthResponse{Data: p.Stats.Data()} + if p.StatsRecorder != nil { + health.Stats = p.StatsRecorder.Data() + } + err := templatesRenderer.JSON(response, http.StatusOK, health) + if err != nil { + log.Error(err) + } +} diff --git a/configuration/configuration.go b/old/configuration/configuration.go similarity index 86% rename from configuration/configuration.go rename to old/configuration/configuration.go index c303a34e1..35a7597bd 100644 --- a/configuration/configuration.go +++ b/old/configuration/configuration.go @@ -6,33 +6,33 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik-extra-service-fabric" "github.com/containous/traefik/acme" - "github.com/containous/traefik/api" - "github.com/containous/traefik/log" - "github.com/containous/traefik/middlewares/tracing" - "github.com/containous/traefik/middlewares/tracing/datadog" - "github.com/containous/traefik/middlewares/tracing/jaeger" - "github.com/containous/traefik/middlewares/tracing/zipkin" - "github.com/containous/traefik/ping" + "github.com/containous/traefik/old/api" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/middlewares/tracing" + "github.com/containous/traefik/old/middlewares/tracing/datadog" + "github.com/containous/traefik/old/middlewares/tracing/jaeger" + "github.com/containous/traefik/old/middlewares/tracing/zipkin" + "github.com/containous/traefik/old/ping" + "github.com/containous/traefik/old/provider/boltdb" + "github.com/containous/traefik/old/provider/consul" + "github.com/containous/traefik/old/provider/consulcatalog" + "github.com/containous/traefik/old/provider/docker" + "github.com/containous/traefik/old/provider/dynamodb" + "github.com/containous/traefik/old/provider/ecs" + "github.com/containous/traefik/old/provider/etcd" + "github.com/containous/traefik/old/provider/eureka" + "github.com/containous/traefik/old/provider/file" + "github.com/containous/traefik/old/provider/kubernetes" + "github.com/containous/traefik/old/provider/marathon" + "github.com/containous/traefik/old/provider/mesos" + "github.com/containous/traefik/old/provider/rancher" + "github.com/containous/traefik/old/provider/rest" + "github.com/containous/traefik/old/provider/zk" + "github.com/containous/traefik/old/types" acmeprovider "github.com/containous/traefik/provider/acme" - "github.com/containous/traefik/provider/boltdb" - "github.com/containous/traefik/provider/consul" - "github.com/containous/traefik/provider/consulcatalog" - "github.com/containous/traefik/provider/docker" - "github.com/containous/traefik/provider/dynamodb" - "github.com/containous/traefik/provider/ecs" - "github.com/containous/traefik/provider/etcd" - "github.com/containous/traefik/provider/eureka" - "github.com/containous/traefik/provider/file" - "github.com/containous/traefik/provider/kubernetes" - "github.com/containous/traefik/provider/marathon" - "github.com/containous/traefik/provider/mesos" - "github.com/containous/traefik/provider/rancher" - "github.com/containous/traefik/provider/rest" - "github.com/containous/traefik/provider/zk" "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" + newtypes "github.com/containous/traefik/types" "github.com/pkg/errors" lego "github.com/xenolf/lego/acme" ) @@ -100,7 +100,6 @@ type GlobalConfiguration struct { ECS *ecs.Provider `description:"Enable ECS backend with default settings" export:"true"` Rancher *rancher.Provider `description:"Enable Rancher backend with default settings" export:"true"` DynamoDB *dynamodb.Provider `description:"Enable DynamoDB backend with default settings" export:"true"` - ServiceFabric *servicefabric.Provider `description:"Enable Service Fabric backend with default settings" export:"true"` Rest *rest.Provider `description:"Enable Rest backend with default settings" export:"true"` API *api.Handler `description:"Enable api/dashboard" export:"true"` Metrics *types.Metrics `description:"Enable a metrics exporter" export:"true"` @@ -262,22 +261,6 @@ func (gc *GlobalConfiguration) initACMEProvider() { gc.ACME.HTTPChallenge = nil } - for _, domain := range gc.ACME.Domains { - if domain.Main != lego.UnFqdn(domain.Main) { - log.Warnf("FQDN detected, please remove the trailing dot: %s", domain.Main) - } - for _, san := range domain.SANs { - if san != lego.UnFqdn(san) { - log.Warnf("FQDN detected, please remove the trailing dot: %s", san) - } - } - } - - if len(gc.ACME.DNSProvider) > 0 { - log.Warn("ACME.DNSProvider is deprecated, use ACME.DNSChallenge instead") - gc.ACME.DNSChallenge = &acmeprovider.DNSChallenge{Provider: gc.ACME.DNSProvider, DelayBeforeCheck: gc.ACME.DelayDontCheckDNS} - } - if gc.ACME.OnDemand { log.Warn("ACME.OnDemand is deprecated") } @@ -296,20 +279,7 @@ func (gc *GlobalConfiguration) InitACMEProvider() (*acmeprovider.Provider, error // If provider file, use Provider ACME instead of ACME if gc.Cluster == nil { provider := &acmeprovider.Provider{} - provider.Configuration = &acmeprovider.Configuration{ - KeyType: gc.ACME.KeyType, - OnHostRule: gc.ACME.OnHostRule, - OnDemand: gc.ACME.OnDemand, - Email: gc.ACME.Email, - Storage: gc.ACME.Storage, - HTTPChallenge: gc.ACME.HTTPChallenge, - DNSChallenge: gc.ACME.DNSChallenge, - TLSChallenge: gc.ACME.TLSChallenge, - Domains: gc.ACME.Domains, - ACMELogging: gc.ACME.ACMELogging, - CAServer: gc.ACME.CAServer, - EntryPoint: gc.ACME.EntryPoint, - } + provider.Configuration = convertACMEChallenge(gc.ACME) store := acmeprovider.NewLocalStore(provider.Storage) provider.Store = store @@ -429,3 +399,47 @@ type HostResolverConfig struct { ResolvConfig string `description:"resolv.conf used for DNS resolving" export:"true"` ResolvDepth int `description:"The maximal depth of DNS recursive resolving" export:"true"` } + +// Deprecated +func convertACMEChallenge(oldACMEChallenge *acme.ACME) *acmeprovider.Configuration { + conf := &acmeprovider.Configuration{ + KeyType: oldACMEChallenge.KeyType, + OnHostRule: oldACMEChallenge.OnHostRule, + OnDemand: oldACMEChallenge.OnDemand, + Email: oldACMEChallenge.Email, + Storage: oldACMEChallenge.Storage, + ACMELogging: oldACMEChallenge.ACMELogging, + CAServer: oldACMEChallenge.CAServer, + EntryPoint: oldACMEChallenge.EntryPoint, + } + + for _, domain := range oldACMEChallenge.Domains { + if domain.Main != lego.UnFqdn(domain.Main) { + log.Warnf("FQDN detected, please remove the trailing dot: %s", domain.Main) + } + for _, san := range domain.SANs { + if san != lego.UnFqdn(san) { + log.Warnf("FQDN detected, please remove the trailing dot: %s", san) + } + } + conf.Domains = append(conf.Domains, newtypes.Domain(domain)) + } + if oldACMEChallenge.HTTPChallenge != nil { + conf.HTTPChallenge = &acmeprovider.HTTPChallenge{ + EntryPoint: oldACMEChallenge.HTTPChallenge.EntryPoint, + } + } + + if oldACMEChallenge.DNSChallenge != nil { + conf.DNSChallenge = &acmeprovider.DNSChallenge{ + Provider: oldACMEChallenge.DNSChallenge.Provider, + DelayBeforeCheck: oldACMEChallenge.DNSChallenge.DelayBeforeCheck, + } + } + + if oldACMEChallenge.TLSChallenge != nil { + conf.TLSChallenge = &acmeprovider.TLSChallenge{} + } + + return conf +} diff --git a/configuration/configuration_test.go b/old/configuration/configuration_test.go similarity index 94% rename from configuration/configuration_test.go rename to old/configuration/configuration_test.go index f41a73fbd..e3e421106 100644 --- a/configuration/configuration_test.go +++ b/old/configuration/configuration_test.go @@ -4,12 +4,12 @@ import ( "testing" "github.com/containous/traefik/acme" - "github.com/containous/traefik/middlewares/tracing" - "github.com/containous/traefik/middlewares/tracing/jaeger" - "github.com/containous/traefik/middlewares/tracing/zipkin" - "github.com/containous/traefik/provider" - acmeprovider "github.com/containous/traefik/provider/acme" - "github.com/containous/traefik/provider/file" + "github.com/containous/traefik/old/middlewares/tracing" + "github.com/containous/traefik/old/middlewares/tracing/jaeger" + "github.com/containous/traefik/old/middlewares/tracing/zipkin" + "github.com/containous/traefik/old/provider" + acmeprovider "github.com/containous/traefik/old/provider/acme" + "github.com/containous/traefik/old/provider/file" "github.com/stretchr/testify/assert" ) diff --git a/configuration/entrypoints.go b/old/configuration/entrypoints.go similarity index 99% rename from configuration/entrypoints.go rename to old/configuration/entrypoints.go index 2acb1ae93..3770eae01 100644 --- a/configuration/entrypoints.go +++ b/old/configuration/entrypoints.go @@ -5,9 +5,9 @@ import ( "strconv" "strings" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" ) // EntryPoint holds an entry point configuration of the reverse proxy (ip, port, TLS...) diff --git a/configuration/entrypoints_test.go b/old/configuration/entrypoints_test.go similarity index 99% rename from configuration/entrypoints_test.go rename to old/configuration/entrypoints_test.go index a49072600..cf4e74fd0 100644 --- a/configuration/entrypoints_test.go +++ b/old/configuration/entrypoints_test.go @@ -3,8 +3,8 @@ package configuration import ( "testing" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/configuration/provider_aggregator.go b/old/configuration/provider_aggregator.go similarity index 93% rename from configuration/provider_aggregator.go rename to old/configuration/provider_aggregator.go index ce36dc384..532f1a5ac 100644 --- a/configuration/provider_aggregator.go +++ b/old/configuration/provider_aggregator.go @@ -3,10 +3,10 @@ package configuration import ( "encoding/json" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" ) // ProviderAggregator aggregate providers @@ -65,9 +65,7 @@ func NewProviderAggregator(gc *GlobalConfiguration) ProviderAggregator { if gc.DynamoDB != nil { provider.quietAddProvider(gc.DynamoDB) } - if gc.ServiceFabric != nil { - provider.quietAddProvider(gc.ServiceFabric) - } + return provider } diff --git a/configuration/router/internal_router.go b/old/configuration/router/internal_router.go similarity index 73% rename from configuration/router/internal_router.go rename to old/configuration/router/internal_router.go index 8c6777fd5..4a4141eb2 100644 --- a/configuration/router/internal_router.go +++ b/old/configuration/router/internal_router.go @@ -2,12 +2,11 @@ package router import ( "github.com/containous/mux" - "github.com/containous/traefik/configuration" - "github.com/containous/traefik/log" - "github.com/containous/traefik/metrics" - "github.com/containous/traefik/middlewares" - mauth "github.com/containous/traefik/middlewares/auth" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/configuration" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/middlewares" + mauth "github.com/containous/traefik/old/middlewares/auth" + "github.com/containous/traefik/old/types" "github.com/urfave/negroni" ) @@ -44,32 +43,29 @@ func NewInternalRouterAggregator(globalConfiguration configuration.GlobalConfigu } router := InternalRouterAggregator{} - routerWithPrefix := InternalRouterAggregator{} - routerWithPrefixAndMiddleware := InternalRouterAggregator{} + routerWithMiddleware := InternalRouterAggregator{} if globalConfiguration.Metrics != nil && globalConfiguration.Metrics.Prometheus != nil && globalConfiguration.Metrics.Prometheus.EntryPoint == entryPointName { - routerWithPrefixAndMiddleware.AddRouter(metrics.PrometheusHandler{}) + // routerWithMiddleware.AddRouter(metrics.PrometheusHandler{}) } if globalConfiguration.Rest != nil && globalConfiguration.Rest.EntryPoint == entryPointName { - routerWithPrefixAndMiddleware.AddRouter(globalConfiguration.Rest) + routerWithMiddleware.AddRouter(globalConfiguration.Rest) } if globalConfiguration.API != nil && globalConfiguration.API.EntryPoint == entryPointName { - routerWithPrefixAndMiddleware.AddRouter(globalConfiguration.API) + routerWithMiddleware.AddRouter(globalConfiguration.API) } if globalConfiguration.Ping != nil && globalConfiguration.Ping.EntryPoint == entryPointName { - routerWithPrefix.AddRouter(globalConfiguration.Ping) + router.AddRouter(globalConfiguration.Ping) } if globalConfiguration.ACME != nil && globalConfiguration.ACME.HTTPChallenge != nil && globalConfiguration.ACME.HTTPChallenge.EntryPoint == entryPointName { router.AddRouter(globalConfiguration.ACME) } - realRouterWithMiddleware := WithMiddleware{router: &routerWithPrefixAndMiddleware, routerMiddlewares: serverMiddlewares} - router.AddRouter(&routerWithPrefix) - router.AddRouter(&realRouterWithMiddleware) + router.AddRouter(&WithMiddleware{router: &routerWithMiddleware, routerMiddlewares: serverMiddlewares}) return &router } @@ -93,23 +89,6 @@ func (wm *WithMiddleware) AddRoutes(systemRouter *mux.Router) { } } -// WithPrefix router which add a prefix -type WithPrefix struct { - Router types.InternalRouter - PathPrefix string -} - -// AddRoutes Add routes to the router -func (wp *WithPrefix) AddRoutes(systemRouter *mux.Router) { - realRouter := systemRouter.PathPrefix("/").Subrouter() - if wp.PathPrefix != "" { - realRouter = systemRouter.PathPrefix(wp.PathPrefix).Subrouter() - realRouter.StrictSlash(true) - realRouter.SkipClean(true) - } - wp.Router.AddRoutes(realRouter) -} - // InternalRouterAggregator InternalRouter that aggregate other internalRouter type InternalRouterAggregator struct { internalRouters []types.InternalRouter diff --git a/configuration/router/internal_router_test.go b/old/configuration/router/internal_router_test.go similarity index 70% rename from configuration/router/internal_router_test.go rename to old/configuration/router/internal_router_test.go index 4c726a6e2..42e92dd4c 100644 --- a/configuration/router/internal_router_test.go +++ b/old/configuration/router/internal_router_test.go @@ -7,13 +7,13 @@ import ( "github.com/containous/mux" "github.com/containous/traefik/acme" - "github.com/containous/traefik/api" - "github.com/containous/traefik/configuration" - "github.com/containous/traefik/log" - "github.com/containous/traefik/ping" - acmeprovider "github.com/containous/traefik/provider/acme" + "github.com/containous/traefik/old/api" + "github.com/containous/traefik/old/configuration" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/ping" + acmeprovider "github.com/containous/traefik/old/provider/acme" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" "github.com/urfave/negroni" ) @@ -149,55 +149,3 @@ func TestWithMiddleware(t *testing.T) { assert.Equal(t, "before middleware1|before middleware2|router|after middleware2|after middleware1", obtained) } - -func TestWithPrefix(t *testing.T) { - testCases := []struct { - desc string - prefix string - testedURL string - expectedStatusCode int - }{ - { - desc: "No prefix", - testedURL: "/test", - expectedStatusCode: 200, - }, - { - desc: "With prefix and wrong url", - prefix: "/prefix", - testedURL: "/test", - expectedStatusCode: 404, - }, - { - desc: "With prefix", - prefix: "/prefix", - testedURL: "/prefix/test", - expectedStatusCode: 200, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - router := WithPrefix{ - Router: MockInternalRouterFunc(func(systemRouter *mux.Router) { - systemRouter.Handle("/test", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - })) - }), - - PathPrefix: test.prefix, - } - internalMuxRouter := mux.NewRouter() - router.AddRoutes(internalMuxRouter) - - recorder := httptest.NewRecorder() - request := httptest.NewRequest(http.MethodGet, test.testedURL, nil) - internalMuxRouter.ServeHTTP(recorder, request) - - assert.Equal(t, test.expectedStatusCode, recorder.Code) - }) - } -} diff --git a/log/logger.go b/old/log/logger.go similarity index 100% rename from log/logger.go rename to old/log/logger.go diff --git a/log/logger_test.go b/old/log/logger_test.go similarity index 100% rename from log/logger_test.go rename to old/log/logger_test.go diff --git a/old/middlewares/accesslog/capture_request_reader.go b/old/middlewares/accesslog/capture_request_reader.go new file mode 100644 index 000000000..4fd0088b1 --- /dev/null +++ b/old/middlewares/accesslog/capture_request_reader.go @@ -0,0 +1,18 @@ +package accesslog + +import "io" + +type captureRequestReader struct { + source io.ReadCloser + count int64 +} + +func (r *captureRequestReader) Read(p []byte) (int, error) { + n, err := r.source.Read(p) + r.count += int64(n) + return n, err +} + +func (r *captureRequestReader) Close() error { + return r.source.Close() +} diff --git a/old/middlewares/accesslog/capture_response_writer.go b/old/middlewares/accesslog/capture_response_writer.go new file mode 100644 index 000000000..58fd368c4 --- /dev/null +++ b/old/middlewares/accesslog/capture_response_writer.go @@ -0,0 +1,68 @@ +package accesslog + +import ( + "bufio" + "fmt" + "net" + "net/http" + + "github.com/containous/traefik/old/middlewares" +) + +var ( + _ middlewares.Stateful = &captureResponseWriter{} +) + +// captureResponseWriter is a wrapper of type http.ResponseWriter +// that tracks request status and size +type captureResponseWriter struct { + rw http.ResponseWriter + status int + size int64 +} + +func (crw *captureResponseWriter) Header() http.Header { + return crw.rw.Header() +} + +func (crw *captureResponseWriter) Write(b []byte) (int, error) { + if crw.status == 0 { + crw.status = http.StatusOK + } + size, err := crw.rw.Write(b) + crw.size += int64(size) + return size, err +} + +func (crw *captureResponseWriter) WriteHeader(s int) { + crw.rw.WriteHeader(s) + crw.status = s +} + +func (crw *captureResponseWriter) Flush() { + if f, ok := crw.rw.(http.Flusher); ok { + f.Flush() + } +} + +func (crw *captureResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if h, ok := crw.rw.(http.Hijacker); ok { + return h.Hijack() + } + return nil, nil, fmt.Errorf("not a hijacker: %T", crw.rw) +} + +func (crw *captureResponseWriter) CloseNotify() <-chan bool { + if c, ok := crw.rw.(http.CloseNotifier); ok { + return c.CloseNotify() + } + return nil +} + +func (crw *captureResponseWriter) Status() int { + return crw.status +} + +func (crw *captureResponseWriter) Size() int64 { + return crw.size +} diff --git a/old/middlewares/accesslog/logdata.go b/old/middlewares/accesslog/logdata.go new file mode 100644 index 000000000..e20e753fe --- /dev/null +++ b/old/middlewares/accesslog/logdata.go @@ -0,0 +1,120 @@ +package accesslog + +import ( + "net/http" +) + +const ( + // StartUTC is the map key used for the time at which request processing started. + StartUTC = "StartUTC" + // StartLocal is the map key used for the local time at which request processing started. + StartLocal = "StartLocal" + // Duration is the map key used for the total time taken by processing the response, including the origin server's time but + // not the log writing time. + Duration = "Duration" + // FrontendName is the map key used for the name of the Traefik frontend. + FrontendName = "FrontendName" + // BackendName is the map key used for the name of the Traefik backend. + BackendName = "BackendName" + // BackendURL is the map key used for the URL of the Traefik backend. + BackendURL = "BackendURL" + // BackendAddr is the map key used for the IP:port of the Traefik backend (extracted from BackendURL) + BackendAddr = "BackendAddr" + // ClientAddr is the map key used for the remote address in its original form (usually IP:port). + ClientAddr = "ClientAddr" + // ClientHost is the map key used for the remote IP address from which the client request was received. + ClientHost = "ClientHost" + // ClientPort is the map key used for the remote TCP port from which the client request was received. + ClientPort = "ClientPort" + // ClientUsername is the map key used for the username provided in the URL, if present. + ClientUsername = "ClientUsername" + // RequestAddr is the map key used for the HTTP Host header (usually IP:port). This is treated as not a header by the Go API. + RequestAddr = "RequestAddr" + // RequestHost is the map key used for the HTTP Host server name (not including port). + RequestHost = "RequestHost" + // RequestPort is the map key used for the TCP port from the HTTP Host. + RequestPort = "RequestPort" + // RequestMethod is the map key used for the HTTP method. + RequestMethod = "RequestMethod" + // RequestPath is the map key used for the HTTP request URI, not including the scheme, host or port. + RequestPath = "RequestPath" + // RequestProtocol is the map key used for the version of HTTP requested. + RequestProtocol = "RequestProtocol" + // RequestContentSize is the map key used for the number of bytes in the request entity (a.k.a. body) sent by the client. + RequestContentSize = "RequestContentSize" + // RequestRefererHeader is the Referer header in the request + RequestRefererHeader = "request_Referer" + // RequestUserAgentHeader is the User-Agent header in the request + RequestUserAgentHeader = "request_User-Agent" + // OriginDuration is the map key used for the time taken by the origin server ('upstream') to return its response. + OriginDuration = "OriginDuration" + // OriginContentSize is the map key used for the content length specified by the origin server, or 0 if unspecified. + OriginContentSize = "OriginContentSize" + // OriginStatus is the map key used for the HTTP status code returned by the origin server. + // If the request was handled by this Traefik instance (e.g. with a redirect), then this value will be absent. + OriginStatus = "OriginStatus" + // DownstreamStatus is the map key used for the HTTP status code returned to the client. + DownstreamStatus = "DownstreamStatus" + // DownstreamContentSize is the map key used for the number of bytes in the response entity returned to the client. + // This is in addition to the "Content-Length" header, which may be present in the origin response. + DownstreamContentSize = "DownstreamContentSize" + // RequestCount is the map key used for the number of requests received since the Traefik instance started. + RequestCount = "RequestCount" + // GzipRatio is the map key used for the response body compression ratio achieved. + GzipRatio = "GzipRatio" + // Overhead is the map key used for the processing time overhead caused by Traefik. + Overhead = "Overhead" + // RetryAttempts is the map key used for the amount of attempts the request was retried. + RetryAttempts = "RetryAttempts" +) + +// These are written out in the default case when no config is provided to specify keys of interest. +var defaultCoreKeys = [...]string{ + StartUTC, + Duration, + FrontendName, + BackendName, + BackendURL, + ClientHost, + ClientPort, + ClientUsername, + RequestHost, + RequestPort, + RequestMethod, + RequestPath, + RequestProtocol, + RequestContentSize, + OriginDuration, + OriginContentSize, + OriginStatus, + DownstreamStatus, + DownstreamContentSize, + RequestCount, +} + +// This contains the set of all keys, i.e. all the default keys plus all non-default keys. +var allCoreKeys = make(map[string]struct{}) + +func init() { + for _, k := range defaultCoreKeys { + allCoreKeys[k] = struct{}{} + } + allCoreKeys[BackendAddr] = struct{}{} + allCoreKeys[ClientAddr] = struct{}{} + allCoreKeys[RequestAddr] = struct{}{} + allCoreKeys[GzipRatio] = struct{}{} + allCoreKeys[StartLocal] = struct{}{} + allCoreKeys[Overhead] = struct{}{} + allCoreKeys[RetryAttempts] = struct{}{} +} + +// CoreLogData holds the fields computed from the request/response. +type CoreLogData map[string]interface{} + +// LogData is the data captured by the middleware so that it can be logged. +type LogData struct { + Core CoreLogData + Request http.Header + OriginResponse http.Header + DownstreamResponse http.Header +} diff --git a/old/middlewares/accesslog/logger.go b/old/middlewares/accesslog/logger.go new file mode 100644 index 000000000..9483e661b --- /dev/null +++ b/old/middlewares/accesslog/logger.go @@ -0,0 +1,334 @@ +package accesslog + +import ( + "context" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + "github.com/containous/flaeg/parse" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" + "github.com/sirupsen/logrus" +) + +type key string + +const ( + // DataTableKey is the key within the request context used to + // store the Log Data Table + DataTableKey key = "LogDataTable" + + // CommonFormat is the common logging format (CLF) + CommonFormat string = "common" + + // JSONFormat is the JSON logging format + JSONFormat string = "json" +) + +type logHandlerParams struct { + logDataTable *LogData + crr *captureRequestReader + crw *captureResponseWriter +} + +// LogHandler will write each request and its response to the access log. +type LogHandler struct { + config *types.AccessLog + logger *logrus.Logger + file *os.File + mu sync.Mutex + httpCodeRanges types.HTTPCodeRanges + logHandlerChan chan logHandlerParams + wg sync.WaitGroup +} + +// NewLogHandler creates a new LogHandler +func NewLogHandler(config *types.AccessLog) (*LogHandler, error) { + file := os.Stdout + if len(config.FilePath) > 0 { + f, err := openAccessLogFile(config.FilePath) + if err != nil { + return nil, fmt.Errorf("error opening access log file: %s", err) + } + file = f + } + logHandlerChan := make(chan logHandlerParams, config.BufferingSize) + + var formatter logrus.Formatter + + switch config.Format { + case CommonFormat: + formatter = new(CommonLogFormatter) + case JSONFormat: + formatter = new(logrus.JSONFormatter) + default: + return nil, fmt.Errorf("unsupported access log format: %s", config.Format) + } + + logger := &logrus.Logger{ + Out: file, + Formatter: formatter, + Hooks: make(logrus.LevelHooks), + Level: logrus.InfoLevel, + } + + logHandler := &LogHandler{ + config: config, + logger: logger, + file: file, + logHandlerChan: logHandlerChan, + } + + if config.Filters != nil { + if httpCodeRanges, err := types.NewHTTPCodeRanges(config.Filters.StatusCodes); err != nil { + log.Errorf("Failed to create new HTTP code ranges: %s", err) + } else { + logHandler.httpCodeRanges = httpCodeRanges + } + } + + if config.BufferingSize > 0 { + logHandler.wg.Add(1) + go func() { + defer logHandler.wg.Done() + for handlerParams := range logHandler.logHandlerChan { + logHandler.logTheRoundTrip(handlerParams.logDataTable, handlerParams.crr, handlerParams.crw) + } + }() + } + + return logHandler, nil +} + +func openAccessLogFile(filePath string) (*os.File, error) { + dir := filepath.Dir(filePath) + + if err := os.MkdirAll(dir, 0755); err != nil { + return nil, fmt.Errorf("failed to create log path %s: %s", dir, err) + } + + file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + return nil, fmt.Errorf("error opening file %s: %s", filePath, err) + } + + return file, nil +} + +// GetLogDataTable gets the request context object that contains logging data. +// This creates data as the request passes through the middleware chain. +func GetLogDataTable(req *http.Request) *LogData { + if ld, ok := req.Context().Value(DataTableKey).(*LogData); ok { + return ld + } + log.Errorf("%s is nil", DataTableKey) + return &LogData{Core: make(CoreLogData)} +} + +func (l *LogHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next http.HandlerFunc) { + now := time.Now().UTC() + + core := CoreLogData{ + StartUTC: now, + StartLocal: now.Local(), + } + + logDataTable := &LogData{Core: core, Request: req.Header} + + reqWithDataTable := req.WithContext(context.WithValue(req.Context(), DataTableKey, logDataTable)) + + var crr *captureRequestReader + if req.Body != nil { + crr = &captureRequestReader{source: req.Body, count: 0} + reqWithDataTable.Body = crr + } + + core[RequestCount] = nextRequestCount() + if req.Host != "" { + core[RequestAddr] = req.Host + core[RequestHost], core[RequestPort] = silentSplitHostPort(req.Host) + } + // copy the URL without the scheme, hostname etc + urlCopy := &url.URL{ + Path: req.URL.Path, + RawPath: req.URL.RawPath, + RawQuery: req.URL.RawQuery, + ForceQuery: req.URL.ForceQuery, + Fragment: req.URL.Fragment, + } + urlCopyString := urlCopy.String() + core[RequestMethod] = req.Method + core[RequestPath] = urlCopyString + core[RequestProtocol] = req.Proto + + core[ClientAddr] = req.RemoteAddr + core[ClientHost], core[ClientPort] = silentSplitHostPort(req.RemoteAddr) + + if forwardedFor := req.Header.Get("X-Forwarded-For"); forwardedFor != "" { + core[ClientHost] = forwardedFor + } + + crw := &captureResponseWriter{rw: rw} + + next.ServeHTTP(crw, reqWithDataTable) + + core[ClientUsername] = formatUsernameForLog(core[ClientUsername]) + + logDataTable.DownstreamResponse = crw.Header() + + if l.config.BufferingSize > 0 { + l.logHandlerChan <- logHandlerParams{ + logDataTable: logDataTable, + crr: crr, + crw: crw, + } + } else { + l.logTheRoundTrip(logDataTable, crr, crw) + } +} + +// Close closes the Logger (i.e. the file, drain logHandlerChan, etc). +func (l *LogHandler) Close() error { + close(l.logHandlerChan) + l.wg.Wait() + return l.file.Close() +} + +// Rotate closes and reopens the log file to allow for rotation +// by an external source. +func (l *LogHandler) Rotate() error { + var err error + + if l.file != nil { + defer func(f *os.File) { + f.Close() + }(l.file) + } + + l.file, err = os.OpenFile(l.config.FilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + return err + } + l.mu.Lock() + defer l.mu.Unlock() + l.logger.Out = l.file + return nil +} + +func silentSplitHostPort(value string) (host string, port string) { + host, port, err := net.SplitHostPort(value) + if err != nil { + return value, "-" + } + return host, port +} + +func formatUsernameForLog(usernameField interface{}) string { + username, ok := usernameField.(string) + if ok && len(username) != 0 { + return username + } + return "-" +} + +// Logging handler to log frontend name, backend name, and elapsed time +func (l *LogHandler) logTheRoundTrip(logDataTable *LogData, crr *captureRequestReader, crw *captureResponseWriter) { + core := logDataTable.Core + + retryAttempts, ok := core[RetryAttempts].(int) + if !ok { + retryAttempts = 0 + } + core[RetryAttempts] = retryAttempts + + if crr != nil { + core[RequestContentSize] = crr.count + } + + core[DownstreamStatus] = crw.Status() + + // n.b. take care to perform time arithmetic using UTC to avoid errors at DST boundaries + totalDuration := time.Now().UTC().Sub(core[StartUTC].(time.Time)) + core[Duration] = totalDuration + + if l.keepAccessLog(crw.Status(), retryAttempts, totalDuration) { + core[DownstreamContentSize] = crw.Size() + if original, ok := core[OriginContentSize]; ok { + o64 := original.(int64) + if o64 != crw.Size() && 0 != crw.Size() { + core[GzipRatio] = float64(o64) / float64(crw.Size()) + } + } + + core[Overhead] = totalDuration + if origin, ok := core[OriginDuration]; ok { + core[Overhead] = totalDuration - origin.(time.Duration) + } + + fields := logrus.Fields{} + + for k, v := range logDataTable.Core { + if l.config.Fields.Keep(k) { + fields[k] = v + } + } + + l.redactHeaders(logDataTable.Request, fields, "request_") + l.redactHeaders(logDataTable.OriginResponse, fields, "origin_") + l.redactHeaders(logDataTable.DownstreamResponse, fields, "downstream_") + + l.mu.Lock() + defer l.mu.Unlock() + l.logger.WithFields(fields).Println() + } +} + +func (l *LogHandler) redactHeaders(headers http.Header, fields logrus.Fields, prefix string) { + for k := range headers { + v := l.config.Fields.KeepHeader(k) + if v == types.AccessLogKeep { + fields[prefix+k] = headers.Get(k) + } else if v == types.AccessLogRedact { + fields[prefix+k] = "REDACTED" + } + } +} + +func (l *LogHandler) keepAccessLog(statusCode, retryAttempts int, duration time.Duration) bool { + if l.config.Filters == nil { + // no filters were specified + return true + } + + if len(l.httpCodeRanges) == 0 && !l.config.Filters.RetryAttempts && l.config.Filters.MinDuration == 0 { + // empty filters were specified, e.g. by passing --accessLog.filters only (without other filter options) + return true + } + + if l.httpCodeRanges.Contains(statusCode) { + return true + } + + if l.config.Filters.RetryAttempts && retryAttempts > 0 { + return true + } + + if l.config.Filters.MinDuration > 0 && (parse.Duration(duration) > l.config.Filters.MinDuration) { + return true + } + + return false +} + +var requestCounter uint64 // Request ID + +func nextRequestCount() uint64 { + return atomic.AddUint64(&requestCounter, 1) +} diff --git a/old/middlewares/accesslog/logger_formatters.go b/old/middlewares/accesslog/logger_formatters.go new file mode 100644 index 000000000..4755079fe --- /dev/null +++ b/old/middlewares/accesslog/logger_formatters.go @@ -0,0 +1,82 @@ +package accesslog + +import ( + "bytes" + "fmt" + "time" + + "github.com/sirupsen/logrus" +) + +// default format for time presentation +const ( + commonLogTimeFormat = "02/Jan/2006:15:04:05 -0700" + defaultValue = "-" +) + +// CommonLogFormatter provides formatting in the Traefik common log format +type CommonLogFormatter struct{} + +// Format formats the log entry in the Traefik common log format +func (f *CommonLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { + b := &bytes.Buffer{} + + var timestamp = defaultValue + if v, ok := entry.Data[StartUTC]; ok { + timestamp = v.(time.Time).Format(commonLogTimeFormat) + } + + var elapsedMillis int64 + if v, ok := entry.Data[Duration]; ok { + elapsedMillis = v.(time.Duration).Nanoseconds() / 1000000 + } + + _, err := fmt.Fprintf(b, "%s - %s [%s] \"%s %s %s\" %v %v %s %s %v %s %s %dms\n", + toLog(entry.Data, ClientHost, defaultValue, false), + toLog(entry.Data, ClientUsername, defaultValue, false), + timestamp, + toLog(entry.Data, RequestMethod, defaultValue, false), + toLog(entry.Data, RequestPath, defaultValue, false), + toLog(entry.Data, RequestProtocol, defaultValue, false), + toLog(entry.Data, OriginStatus, defaultValue, true), + toLog(entry.Data, OriginContentSize, defaultValue, true), + toLog(entry.Data, "request_Referer", `"-"`, true), + toLog(entry.Data, "request_User-Agent", `"-"`, true), + toLog(entry.Data, RequestCount, defaultValue, true), + toLog(entry.Data, FrontendName, defaultValue, true), + toLog(entry.Data, BackendURL, defaultValue, true), + elapsedMillis) + + return b.Bytes(), err +} + +func toLog(fields logrus.Fields, key string, defaultValue string, quoted bool) interface{} { + if v, ok := fields[key]; ok { + if v == nil { + return defaultValue + } + + switch s := v.(type) { + case string: + return toLogEntry(s, defaultValue, quoted) + + case fmt.Stringer: + return toLogEntry(s.String(), defaultValue, quoted) + + default: + return v + } + } + return defaultValue + +} +func toLogEntry(s string, defaultValue string, quote bool) string { + if len(s) == 0 { + return defaultValue + } + + if quote { + return `"` + s + `"` + } + return s +} diff --git a/old/middlewares/accesslog/logger_formatters_test.go b/old/middlewares/accesslog/logger_formatters_test.go new file mode 100644 index 000000000..22b68da58 --- /dev/null +++ b/old/middlewares/accesslog/logger_formatters_test.go @@ -0,0 +1,140 @@ +package accesslog + +import ( + "net/http" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestCommonLogFormatter_Format(t *testing.T) { + clf := CommonLogFormatter{} + + testCases := []struct { + name string + data map[string]interface{} + expectedLog string + }{ + { + name: "OriginStatus & OriginContentSize are nil", + data: map[string]interface{}{ + StartUTC: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Duration: 123 * time.Second, + ClientHost: "10.0.0.1", + ClientUsername: "Client", + RequestMethod: http.MethodGet, + RequestPath: "/foo", + RequestProtocol: "http", + OriginStatus: nil, + OriginContentSize: nil, + RequestRefererHeader: "", + RequestUserAgentHeader: "", + RequestCount: 0, + FrontendName: "", + BackendURL: "", + }, + expectedLog: `10.0.0.1 - Client [10/Nov/2009:23:00:00 +0000] "GET /foo http" - - "-" "-" 0 - - 123000ms +`, + }, + { + name: "all data", + data: map[string]interface{}{ + StartUTC: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Duration: 123 * time.Second, + ClientHost: "10.0.0.1", + ClientUsername: "Client", + RequestMethod: http.MethodGet, + RequestPath: "/foo", + RequestProtocol: "http", + OriginStatus: 123, + OriginContentSize: 132, + RequestRefererHeader: "referer", + RequestUserAgentHeader: "agent", + RequestCount: nil, + FrontendName: "foo", + BackendURL: "http://10.0.0.2/toto", + }, + expectedLog: `10.0.0.1 - Client [10/Nov/2009:23:00:00 +0000] "GET /foo http" 123 132 "referer" "agent" - "foo" "http://10.0.0.2/toto" 123000ms +`, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + entry := &logrus.Entry{Data: test.data} + + raw, err := clf.Format(entry) + assert.NoError(t, err) + + assert.Equal(t, test.expectedLog, string(raw)) + }) + } + +} + +func Test_toLog(t *testing.T) { + + testCases := []struct { + desc string + fields logrus.Fields + fieldName string + defaultValue string + quoted bool + expectedLog interface{} + }{ + { + desc: "Should return int 1", + fields: logrus.Fields{ + "Powpow": 1, + }, + fieldName: "Powpow", + defaultValue: defaultValue, + quoted: false, + expectedLog: 1, + }, + { + desc: "Should return string foo", + fields: logrus.Fields{ + "Powpow": "foo", + }, + fieldName: "Powpow", + defaultValue: defaultValue, + quoted: true, + expectedLog: `"foo"`, + }, + { + desc: "Should return defaultValue if fieldName does not exist", + fields: logrus.Fields{ + "Powpow": "foo", + }, + fieldName: "", + defaultValue: defaultValue, + quoted: false, + expectedLog: "-", + }, + { + desc: "Should return defaultValue if fields is nil", + fields: nil, + fieldName: "", + defaultValue: defaultValue, + quoted: false, + expectedLog: "-", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + lg := toLog(test.fields, test.fieldName, defaultValue, test.quoted) + + assert.Equal(t, test.expectedLog, lg) + }) + } +} diff --git a/old/middlewares/accesslog/logger_test.go b/old/middlewares/accesslog/logger_test.go new file mode 100644 index 000000000..35cbf9280 --- /dev/null +++ b/old/middlewares/accesslog/logger_test.go @@ -0,0 +1,644 @@ +package accesslog + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "regexp" + "strings" + "testing" + "time" + + "github.com/containous/flaeg/parse" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + logFileNameSuffix = "/traefik/logger/test.log" + testContent = "Hello, World" + testBackendName = "http://127.0.0.1/testBackend" + testFrontendName = "testFrontend" + testStatus = 123 + testContentSize int64 = 12 + testHostname = "TestHost" + testUsername = "TestUser" + testPath = "testpath" + testPort = 8181 + testProto = "HTTP/0.0" + testMethod = http.MethodPost + testReferer = "testReferer" + testUserAgent = "testUserAgent" + testRetryAttempts = 2 + testStart = time.Now() +) + +func TestLogRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "traefik_") + if err != nil { + t.Fatalf("Error setting up temporary directory: %s", err) + } + + fileName := tempDir + "traefik.log" + rotatedFileName := fileName + ".rotated" + + config := &types.AccessLog{FilePath: fileName, Format: CommonFormat} + logHandler, err := NewLogHandler(config) + if err != nil { + t.Fatalf("Error creating new log handler: %s", err) + } + defer logHandler.Close() + + recorder := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "http://localhost", nil) + next := func(rw http.ResponseWriter, req *http.Request) { + rw.WriteHeader(http.StatusOK) + } + + iterations := 20 + halfDone := make(chan bool) + writeDone := make(chan bool) + go func() { + for i := 0; i < iterations; i++ { + logHandler.ServeHTTP(recorder, req, next) + if i == iterations/2 { + halfDone <- true + } + } + writeDone <- true + }() + + <-halfDone + err = os.Rename(fileName, rotatedFileName) + if err != nil { + t.Fatalf("Error renaming file: %s", err) + } + + err = logHandler.Rotate() + if err != nil { + t.Fatalf("Error rotating file: %s", err) + } + + select { + case <-writeDone: + gotLineCount := lineCount(t, fileName) + lineCount(t, rotatedFileName) + if iterations != gotLineCount { + t.Errorf("Wanted %d written log lines, got %d", iterations, gotLineCount) + } + case <-time.After(500 * time.Millisecond): + t.Fatalf("test timed out") + } + + close(halfDone) + close(writeDone) +} + +func lineCount(t *testing.T, fileName string) int { + t.Helper() + fileContents, err := ioutil.ReadFile(fileName) + if err != nil { + t.Fatalf("Error reading from file %s: %s", fileName, err) + } + + count := 0 + for _, line := range strings.Split(string(fileContents), "\n") { + if strings.TrimSpace(line) == "" { + continue + } + count++ + } + + return count +} + +func TestLoggerCLF(t *testing.T) { + tmpDir := createTempDir(t, CommonFormat) + defer os.RemoveAll(tmpDir) + + logFilePath := filepath.Join(tmpDir, logFileNameSuffix) + config := &types.AccessLog{FilePath: logFilePath, Format: CommonFormat} + doLogging(t, config) + + logData, err := ioutil.ReadFile(logFilePath) + require.NoError(t, err) + + expectedLog := ` TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 1 "testFrontend" "http://127.0.0.1/testBackend" 1ms` + assertValidLogData(t, expectedLog, logData) +} + +func TestAsyncLoggerCLF(t *testing.T) { + tmpDir := createTempDir(t, CommonFormat) + defer os.RemoveAll(tmpDir) + + logFilePath := filepath.Join(tmpDir, logFileNameSuffix) + config := &types.AccessLog{FilePath: logFilePath, Format: CommonFormat, BufferingSize: 1024} + doLogging(t, config) + + logData, err := ioutil.ReadFile(logFilePath) + require.NoError(t, err) + + expectedLog := ` TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 1 "testFrontend" "http://127.0.0.1/testBackend" 1ms` + assertValidLogData(t, expectedLog, logData) +} + +func assertString(exp string) func(t *testing.T, actual interface{}) { + return func(t *testing.T, actual interface{}) { + t.Helper() + + assert.Equal(t, exp, actual) + } +} + +func assertNotEqual(exp string) func(t *testing.T, actual interface{}) { + return func(t *testing.T, actual interface{}) { + t.Helper() + + assert.NotEqual(t, exp, actual) + } +} + +func assertFloat64(exp float64) func(t *testing.T, actual interface{}) { + return func(t *testing.T, actual interface{}) { + t.Helper() + + assert.Equal(t, exp, actual) + } +} + +func assertFloat64NotZero() func(t *testing.T, actual interface{}) { + return func(t *testing.T, actual interface{}) { + t.Helper() + + assert.NotZero(t, actual) + } +} + +func TestLoggerJSON(t *testing.T) { + testCases := []struct { + desc string + config *types.AccessLog + expected map[string]func(t *testing.T, value interface{}) + }{ + { + desc: "default config", + config: &types.AccessLog{ + FilePath: "", + Format: JSONFormat, + }, + expected: map[string]func(t *testing.T, value interface{}){ + RequestHost: assertString(testHostname), + RequestAddr: assertString(testHostname), + RequestMethod: assertString(testMethod), + RequestPath: assertString(testPath), + RequestProtocol: assertString(testProto), + RequestPort: assertString("-"), + DownstreamStatus: assertFloat64(float64(testStatus)), + DownstreamContentSize: assertFloat64(float64(len(testContent))), + OriginContentSize: assertFloat64(float64(len(testContent))), + OriginStatus: assertFloat64(float64(testStatus)), + RequestRefererHeader: assertString(testReferer), + RequestUserAgentHeader: assertString(testUserAgent), + FrontendName: assertString(testFrontendName), + BackendURL: assertString(testBackendName), + ClientUsername: assertString(testUsername), + ClientHost: assertString(testHostname), + ClientPort: assertString(fmt.Sprintf("%d", testPort)), + ClientAddr: assertString(fmt.Sprintf("%s:%d", testHostname, testPort)), + "level": assertString("info"), + "msg": assertString(""), + "downstream_Content-Type": assertString("text/plain; charset=utf-8"), + RequestCount: assertFloat64NotZero(), + Duration: assertFloat64NotZero(), + Overhead: assertFloat64NotZero(), + RetryAttempts: assertFloat64(float64(testRetryAttempts)), + "time": assertNotEqual(""), + "StartLocal": assertNotEqual(""), + "StartUTC": assertNotEqual(""), + }, + }, + { + desc: "default config drop all fields", + config: &types.AccessLog{ + FilePath: "", + Format: JSONFormat, + Fields: &types.AccessLogFields{ + DefaultMode: "drop", + }, + }, + expected: map[string]func(t *testing.T, value interface{}){ + "level": assertString("info"), + "msg": assertString(""), + "time": assertNotEqual(""), + "downstream_Content-Type": assertString("text/plain; charset=utf-8"), + RequestRefererHeader: assertString(testReferer), + RequestUserAgentHeader: assertString(testUserAgent), + }, + }, + { + desc: "default config drop all fields and headers", + config: &types.AccessLog{ + FilePath: "", + Format: JSONFormat, + Fields: &types.AccessLogFields{ + DefaultMode: "drop", + Headers: &types.FieldHeaders{ + DefaultMode: "drop", + }, + }, + }, + expected: map[string]func(t *testing.T, value interface{}){ + "level": assertString("info"), + "msg": assertString(""), + "time": assertNotEqual(""), + }, + }, + { + desc: "default config drop all fields and redact headers", + config: &types.AccessLog{ + FilePath: "", + Format: JSONFormat, + Fields: &types.AccessLogFields{ + DefaultMode: "drop", + Headers: &types.FieldHeaders{ + DefaultMode: "redact", + }, + }, + }, + expected: map[string]func(t *testing.T, value interface{}){ + "level": assertString("info"), + "msg": assertString(""), + "time": assertNotEqual(""), + "downstream_Content-Type": assertString("REDACTED"), + RequestRefererHeader: assertString("REDACTED"), + RequestUserAgentHeader: assertString("REDACTED"), + }, + }, + { + desc: "default config drop all fields and headers but kept someone", + config: &types.AccessLog{ + FilePath: "", + Format: JSONFormat, + Fields: &types.AccessLogFields{ + DefaultMode: "drop", + Names: types.FieldNames{ + RequestHost: "keep", + }, + Headers: &types.FieldHeaders{ + DefaultMode: "drop", + Names: types.FieldHeaderNames{ + "Referer": "keep", + }, + }, + }, + }, + expected: map[string]func(t *testing.T, value interface{}){ + RequestHost: assertString(testHostname), + "level": assertString("info"), + "msg": assertString(""), + "time": assertNotEqual(""), + RequestRefererHeader: assertString(testReferer), + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + tmpDir := createTempDir(t, JSONFormat) + defer os.RemoveAll(tmpDir) + + logFilePath := filepath.Join(tmpDir, logFileNameSuffix) + + test.config.FilePath = logFilePath + doLogging(t, test.config) + + logData, err := ioutil.ReadFile(logFilePath) + require.NoError(t, err) + + jsonData := make(map[string]interface{}) + err = json.Unmarshal(logData, &jsonData) + require.NoError(t, err) + + assert.Equal(t, len(test.expected), len(jsonData)) + + for field, assertion := range test.expected { + assertion(t, jsonData[field]) + } + }) + } +} + +func TestNewLogHandlerOutputStdout(t *testing.T) { + testCases := []struct { + desc string + config *types.AccessLog + expectedLog string + }{ + { + desc: "default config", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + }, + expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + }, + { + desc: "default config with empty filters", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Filters: &types.AccessLogFilters{}, + }, + expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + }, + { + desc: "Status code filter not matching", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Filters: &types.AccessLogFilters{ + StatusCodes: []string{"200"}, + }, + }, + expectedLog: ``, + }, + { + desc: "Status code filter matching", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Filters: &types.AccessLogFilters{ + StatusCodes: []string{"123"}, + }, + }, + expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + }, + { + desc: "Duration filter not matching", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Filters: &types.AccessLogFilters{ + MinDuration: parse.Duration(1 * time.Hour), + }, + }, + expectedLog: ``, + }, + { + desc: "Duration filter matching", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Filters: &types.AccessLogFilters{ + MinDuration: parse.Duration(1 * time.Millisecond), + }, + }, + expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + }, + { + desc: "Retry attempts filter matching", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Filters: &types.AccessLogFilters{ + RetryAttempts: true, + }, + }, + expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + }, + { + desc: "Default mode keep", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Fields: &types.AccessLogFields{ + DefaultMode: "keep", + }, + }, + expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + }, + { + desc: "Default mode keep with override", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Fields: &types.AccessLogFields{ + DefaultMode: "keep", + Names: types.FieldNames{ + ClientHost: "drop", + }, + }, + }, + expectedLog: `- - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + }, + { + desc: "Default mode drop", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Fields: &types.AccessLogFields{ + DefaultMode: "drop", + }, + }, + expectedLog: `- - - [-] "- - -" - - "testReferer" "testUserAgent" - - - 0ms`, + }, + { + desc: "Default mode drop with override", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Fields: &types.AccessLogFields{ + DefaultMode: "drop", + Names: types.FieldNames{ + ClientHost: "drop", + ClientUsername: "keep", + }, + }, + }, + expectedLog: `- - TestUser [-] "- - -" - - "testReferer" "testUserAgent" - - - 0ms`, + }, + { + desc: "Default mode drop with header dropped", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Fields: &types.AccessLogFields{ + DefaultMode: "drop", + Names: types.FieldNames{ + ClientHost: "drop", + ClientUsername: "keep", + }, + Headers: &types.FieldHeaders{ + DefaultMode: "drop", + }, + }, + }, + expectedLog: `- - TestUser [-] "- - -" - - "-" "-" - - - 0ms`, + }, + { + desc: "Default mode drop with header redacted", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Fields: &types.AccessLogFields{ + DefaultMode: "drop", + Names: types.FieldNames{ + ClientHost: "drop", + ClientUsername: "keep", + }, + Headers: &types.FieldHeaders{ + DefaultMode: "redact", + }, + }, + }, + expectedLog: `- - TestUser [-] "- - -" - - "REDACTED" "REDACTED" - - - 0ms`, + }, + { + desc: "Default mode drop with header redacted", + config: &types.AccessLog{ + FilePath: "", + Format: CommonFormat, + Fields: &types.AccessLogFields{ + DefaultMode: "drop", + Names: types.FieldNames{ + ClientHost: "drop", + ClientUsername: "keep", + }, + Headers: &types.FieldHeaders{ + DefaultMode: "keep", + Names: types.FieldHeaderNames{ + "Referer": "redact", + }, + }, + }, + }, + expectedLog: `- - TestUser [-] "- - -" - - "REDACTED" "testUserAgent" - - - 0ms`, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + + // NOTE: It is not possible to run these cases in parallel because we capture Stdout + + file, restoreStdout := captureStdout(t) + defer restoreStdout() + + doLogging(t, test.config) + + written, err := ioutil.ReadFile(file.Name()) + require.NoError(t, err, "unable to read captured stdout from file") + assertValidLogData(t, test.expectedLog, written) + }) + } +} + +func assertValidLogData(t *testing.T, expected string, logData []byte) { + + if len(expected) == 0 { + assert.Zero(t, len(logData)) + t.Log(string(logData)) + return + } + + result, err := ParseAccessLog(string(logData)) + require.NoError(t, err) + + resultExpected, err := ParseAccessLog(expected) + require.NoError(t, err) + + formatErrMessage := fmt.Sprintf(` + Expected: %s + Actual: %s`, expected, string(logData)) + + require.Equal(t, len(resultExpected), len(result), formatErrMessage) + assert.Equal(t, resultExpected[ClientHost], result[ClientHost], formatErrMessage) + assert.Equal(t, resultExpected[ClientUsername], result[ClientUsername], formatErrMessage) + assert.Equal(t, resultExpected[RequestMethod], result[RequestMethod], formatErrMessage) + assert.Equal(t, resultExpected[RequestPath], result[RequestPath], formatErrMessage) + assert.Equal(t, resultExpected[RequestProtocol], result[RequestProtocol], formatErrMessage) + assert.Equal(t, resultExpected[OriginStatus], result[OriginStatus], formatErrMessage) + assert.Equal(t, resultExpected[OriginContentSize], result[OriginContentSize], formatErrMessage) + assert.Equal(t, resultExpected[RequestRefererHeader], result[RequestRefererHeader], formatErrMessage) + assert.Equal(t, resultExpected[RequestUserAgentHeader], result[RequestUserAgentHeader], formatErrMessage) + assert.Regexp(t, regexp.MustCompile("[0-9]*"), result[RequestCount], formatErrMessage) + assert.Equal(t, resultExpected[FrontendName], result[FrontendName], formatErrMessage) + assert.Equal(t, resultExpected[BackendURL], result[BackendURL], formatErrMessage) + assert.Regexp(t, regexp.MustCompile("[0-9]*ms"), result[Duration], formatErrMessage) +} + +func captureStdout(t *testing.T) (out *os.File, restoreStdout func()) { + file, err := ioutil.TempFile("", "testlogger") + require.NoError(t, err, "failed to create temp file") + + original := os.Stdout + os.Stdout = file + + restoreStdout = func() { + os.Stdout = original + } + + return file, restoreStdout +} + +func createTempDir(t *testing.T, prefix string) string { + tmpDir, err := ioutil.TempDir("", prefix) + require.NoError(t, err, "failed to create temp dir") + + return tmpDir +} + +func doLogging(t *testing.T, config *types.AccessLog) { + logger, err := NewLogHandler(config) + require.NoError(t, err) + defer logger.Close() + + if config.FilePath != "" { + _, err = os.Stat(config.FilePath) + require.NoError(t, err, fmt.Sprintf("logger should create %s", config.FilePath)) + } + + req := &http.Request{ + Header: map[string][]string{ + "User-Agent": {testUserAgent}, + "Referer": {testReferer}, + }, + Proto: testProto, + Host: testHostname, + Method: testMethod, + RemoteAddr: fmt.Sprintf("%s:%d", testHostname, testPort), + URL: &url.URL{ + Path: testPath, + }, + } + + logger.ServeHTTP(httptest.NewRecorder(), req, logWriterTestHandlerFunc) +} + +func logWriterTestHandlerFunc(rw http.ResponseWriter, r *http.Request) { + if _, err := rw.Write([]byte(testContent)); err != nil { + log.Error(err) + } + + rw.WriteHeader(testStatus) + + logDataTable := GetLogDataTable(r) + logDataTable.Core[FrontendName] = testFrontendName + logDataTable.Core[BackendURL] = testBackendName + logDataTable.Core[OriginStatus] = testStatus + logDataTable.Core[OriginContentSize] = testContentSize + logDataTable.Core[RetryAttempts] = testRetryAttempts + logDataTable.Core[StartUTC] = testStart.UTC() + logDataTable.Core[StartLocal] = testStart.Local() + logDataTable.Core[ClientUsername] = testUsername +} diff --git a/old/middlewares/accesslog/parser.go b/old/middlewares/accesslog/parser.go new file mode 100644 index 000000000..c2931d153 --- /dev/null +++ b/old/middlewares/accesslog/parser.go @@ -0,0 +1,54 @@ +package accesslog + +import ( + "bytes" + "regexp" +) + +// ParseAccessLog parse line of access log and return a map with each fields +func ParseAccessLog(data string) (map[string]string, error) { + var buffer bytes.Buffer + buffer.WriteString(`(\S+)`) // 1 - ClientHost + buffer.WriteString(`\s-\s`) // - - Spaces + buffer.WriteString(`(\S+)\s`) // 2 - ClientUsername + buffer.WriteString(`\[([^]]+)\]\s`) // 3 - StartUTC + buffer.WriteString(`"(\S*)\s?`) // 4 - RequestMethod + buffer.WriteString(`((?:[^"]*(?:\\")?)*)\s`) // 5 - RequestPath + buffer.WriteString(`([^"]*)"\s`) // 6 - RequestProtocol + buffer.WriteString(`(\S+)\s`) // 7 - OriginStatus + buffer.WriteString(`(\S+)\s`) // 8 - OriginContentSize + buffer.WriteString(`("?\S+"?)\s`) // 9 - Referrer + buffer.WriteString(`("\S+")\s`) // 10 - User-Agent + buffer.WriteString(`(\S+)\s`) // 11 - RequestCount + buffer.WriteString(`("[^"]*"|-)\s`) // 12 - FrontendName + buffer.WriteString(`("[^"]*"|-)\s`) // 13 - BackendURL + buffer.WriteString(`(\S+)`) // 14 - Duration + + regex, err := regexp.Compile(buffer.String()) + if err != nil { + return nil, err + } + + submatch := regex.FindStringSubmatch(data) + result := make(map[string]string) + + // Need to be > 13 to match CLF format + if len(submatch) > 13 { + result[ClientHost] = submatch[1] + result[ClientUsername] = submatch[2] + result[StartUTC] = submatch[3] + result[RequestMethod] = submatch[4] + result[RequestPath] = submatch[5] + result[RequestProtocol] = submatch[6] + result[OriginStatus] = submatch[7] + result[OriginContentSize] = submatch[8] + result[RequestRefererHeader] = submatch[9] + result[RequestUserAgentHeader] = submatch[10] + result[RequestCount] = submatch[11] + result[FrontendName] = submatch[12] + result[BackendURL] = submatch[13] + result[Duration] = submatch[14] + } + + return result, nil +} diff --git a/old/middlewares/accesslog/parser_test.go b/old/middlewares/accesslog/parser_test.go new file mode 100644 index 000000000..701fed4c3 --- /dev/null +++ b/old/middlewares/accesslog/parser_test.go @@ -0,0 +1,75 @@ +package accesslog + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseAccessLog(t *testing.T) { + testCases := []struct { + desc string + value string + expected map[string]string + }{ + { + desc: "full log", + value: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 1 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, + expected: map[string]string{ + ClientHost: "TestHost", + ClientUsername: "TestUser", + StartUTC: "13/Apr/2016:07:14:19 -0700", + RequestMethod: "POST", + RequestPath: "testpath", + RequestProtocol: "HTTP/0.0", + OriginStatus: "123", + OriginContentSize: "12", + RequestRefererHeader: `"testReferer"`, + RequestUserAgentHeader: `"testUserAgent"`, + RequestCount: "1", + FrontendName: `"testFrontend"`, + BackendURL: `"http://127.0.0.1/testBackend"`, + Duration: "1ms", + }, + }, + { + desc: "log with space", + value: `127.0.0.1 - - [09/Mar/2018:10:51:32 +0000] "GET / HTTP/1.1" 401 17 "-" "Go-http-client/1.1" 1 "testFrontend with space" - 0ms`, + expected: map[string]string{ + ClientHost: "127.0.0.1", + ClientUsername: "-", + StartUTC: "09/Mar/2018:10:51:32 +0000", + RequestMethod: "GET", + RequestPath: "/", + RequestProtocol: "HTTP/1.1", + OriginStatus: "401", + OriginContentSize: "17", + RequestRefererHeader: `"-"`, + RequestUserAgentHeader: `"Go-http-client/1.1"`, + RequestCount: "1", + FrontendName: `"testFrontend with space"`, + BackendURL: `-`, + Duration: "0ms", + }, + }, + { + desc: "bad log", + value: `bad`, + expected: map[string]string{}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + result, err := ParseAccessLog(test.value) + assert.NoError(t, err) + assert.Equal(t, len(test.expected), len(result)) + for key, value := range test.expected { + assert.Equal(t, value, result[key]) + } + }) + } +} diff --git a/middlewares/accesslog/save_backend.go b/old/middlewares/accesslog/save_backend.go similarity index 100% rename from middlewares/accesslog/save_backend.go rename to old/middlewares/accesslog/save_backend.go diff --git a/middlewares/accesslog/save_frontend.go b/old/middlewares/accesslog/save_frontend.go similarity index 100% rename from middlewares/accesslog/save_frontend.go rename to old/middlewares/accesslog/save_frontend.go diff --git a/old/middlewares/accesslog/save_retries.go b/old/middlewares/accesslog/save_retries.go new file mode 100644 index 000000000..56b19a14b --- /dev/null +++ b/old/middlewares/accesslog/save_retries.go @@ -0,0 +1,19 @@ +package accesslog + +import ( + "net/http" +) + +// SaveRetries is an implementation of RetryListener that stores RetryAttempts in the LogDataTable. +type SaveRetries struct{} + +// Retried implements the RetryListener interface and will be called for each retry that happens. +func (s *SaveRetries) Retried(req *http.Request, attempt int) { + // it is the request attempt x, but the retry attempt is x-1 + if attempt > 0 { + attempt-- + } + + table := GetLogDataTable(req) + table.Core[RetryAttempts] = attempt +} diff --git a/old/middlewares/accesslog/save_retries_test.go b/old/middlewares/accesslog/save_retries_test.go new file mode 100644 index 000000000..add4cc28f --- /dev/null +++ b/old/middlewares/accesslog/save_retries_test.go @@ -0,0 +1,48 @@ +package accesslog + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" +) + +func TestSaveRetries(t *testing.T) { + tests := []struct { + requestAttempt int + wantRetryAttemptsInLog int + }{ + { + requestAttempt: 0, + wantRetryAttemptsInLog: 0, + }, + { + requestAttempt: 1, + wantRetryAttemptsInLog: 0, + }, + { + requestAttempt: 3, + wantRetryAttemptsInLog: 2, + }, + } + + for _, test := range tests { + test := test + + t.Run(fmt.Sprintf("%d retries", test.requestAttempt), func(t *testing.T) { + t.Parallel() + saveRetries := &SaveRetries{} + + logDataTable := &LogData{Core: make(CoreLogData)} + req := httptest.NewRequest(http.MethodGet, "/some/path", nil) + reqWithDataTable := req.WithContext(context.WithValue(req.Context(), DataTableKey, logDataTable)) + + saveRetries.Retried(reqWithDataTable, test.requestAttempt) + + if logDataTable.Core[RetryAttempts] != test.wantRetryAttemptsInLog { + t.Errorf("got %v in logDataTable, want %v", logDataTable.Core[RetryAttempts], test.wantRetryAttemptsInLog) + } + }) + } +} diff --git a/middlewares/accesslog/save_username.go b/old/middlewares/accesslog/save_username.go similarity index 100% rename from middlewares/accesslog/save_username.go rename to old/middlewares/accesslog/save_username.go diff --git a/middlewares/addPrefix.go b/old/middlewares/addPrefix.go similarity index 100% rename from middlewares/addPrefix.go rename to old/middlewares/addPrefix.go diff --git a/middlewares/addPrefix_test.go b/old/middlewares/addPrefix_test.go similarity index 95% rename from middlewares/addPrefix_test.go rename to old/middlewares/addPrefix_test.go index 53a1b84c0..b22eca72f 100644 --- a/middlewares/addPrefix_test.go +++ b/old/middlewares/addPrefix_test.go @@ -5,10 +5,12 @@ import ( "testing" "github.com/containous/traefik/testhelpers" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) func TestAddPrefix(t *testing.T) { + logrus.SetLevel(logrus.DebugLevel) tests := []struct { desc string prefix string diff --git a/middlewares/auth/authenticator.go b/old/middlewares/auth/authenticator.go similarity index 95% rename from middlewares/auth/authenticator.go rename to old/middlewares/auth/authenticator.go index 922210fc8..cdcd96266 100644 --- a/middlewares/auth/authenticator.go +++ b/old/middlewares/auth/authenticator.go @@ -7,10 +7,10 @@ import ( "strings" goauth "github.com/abbot/go-http-auth" - "github.com/containous/traefik/log" - "github.com/containous/traefik/middlewares/accesslog" - "github.com/containous/traefik/middlewares/tracing" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/middlewares/accesslog" + "github.com/containous/traefik/old/middlewares/tracing" + "github.com/containous/traefik/old/types" "github.com/urfave/negroni" ) @@ -82,6 +82,7 @@ func createAuthForwardHandler(authConfig *types.Auth) negroni.HandlerFunc { Forward(authConfig.Forward, w, r, next) }) } + func createAuthDigestHandler(digestAuth *goauth.DigestAuth, authConfig *types.Auth) negroni.HandlerFunc { return negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { if username, _ := digestAuth.CheckAuth(r); username == "" { @@ -104,6 +105,7 @@ func createAuthDigestHandler(digestAuth *goauth.DigestAuth, authConfig *types.Au } }) } + func createAuthBasicHandler(basicAuth *goauth.BasicAuth, authConfig *types.Auth) negroni.HandlerFunc { return negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { if username := basicAuth.CheckAuth(r); username == "" { diff --git a/middlewares/auth/authenticator_test.go b/old/middlewares/auth/authenticator_test.go similarity index 98% rename from middlewares/auth/authenticator_test.go rename to old/middlewares/auth/authenticator_test.go index 2f2cdb0f8..2b9691a50 100644 --- a/middlewares/auth/authenticator_test.go +++ b/old/middlewares/auth/authenticator_test.go @@ -8,9 +8,9 @@ import ( "os" "testing" - "github.com/containous/traefik/middlewares/tracing" + "github.com/containous/traefik/old/middlewares/tracing" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/testhelpers" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/urfave/negroni" diff --git a/old/middlewares/auth/forward.go b/old/middlewares/auth/forward.go new file mode 100644 index 000000000..2c274d88f --- /dev/null +++ b/old/middlewares/auth/forward.go @@ -0,0 +1,157 @@ +package auth + +import ( + "io/ioutil" + "net" + "net/http" + "strings" + + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/middlewares/tracing" + "github.com/containous/traefik/old/types" + "github.com/vulcand/oxy/forward" + "github.com/vulcand/oxy/utils" +) + +const ( + xForwardedURI = "X-Forwarded-Uri" + xForwardedMethod = "X-Forwarded-Method" +) + +// Forward the authentication to a external server +func Forward(config *types.Forward, w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + // Ensure our request client does not follow redirects + httpClient := http.Client{ + CheckRedirect: func(r *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + }, + } + + if config.TLS != nil { + tlsConfig, err := config.TLS.CreateTLSConfig() + if err != nil { + tracing.SetErrorAndDebugLog(r, "Unable to configure TLS to call %s. Cause %s", config.Address, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + httpClient.Transport = &http.Transport{ + TLSClientConfig: tlsConfig, + } + } + + forwardReq, err := http.NewRequest(http.MethodGet, config.Address, http.NoBody) + tracing.LogRequest(tracing.GetSpan(r), forwardReq) + if err != nil { + tracing.SetErrorAndDebugLog(r, "Error calling %s. Cause %s", config.Address, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + writeHeader(r, forwardReq, config.TrustForwardHeader) + + tracing.InjectRequestHeaders(forwardReq) + + forwardResponse, forwardErr := httpClient.Do(forwardReq) + if forwardErr != nil { + tracing.SetErrorAndDebugLog(r, "Error calling %s. Cause: %s", config.Address, forwardErr) + w.WriteHeader(http.StatusInternalServerError) + return + } + + body, readError := ioutil.ReadAll(forwardResponse.Body) + if readError != nil { + tracing.SetErrorAndDebugLog(r, "Error reading body %s. Cause: %s", config.Address, readError) + w.WriteHeader(http.StatusInternalServerError) + return + } + defer forwardResponse.Body.Close() + + // Pass the forward response's body and selected headers if it + // didn't return a response within the range of [200, 300). + if forwardResponse.StatusCode < http.StatusOK || forwardResponse.StatusCode >= http.StatusMultipleChoices { + log.Debugf("Remote error %s. StatusCode: %d", config.Address, forwardResponse.StatusCode) + + utils.CopyHeaders(w.Header(), forwardResponse.Header) + utils.RemoveHeaders(w.Header(), forward.HopHeaders...) + + // Grab the location header, if any. + redirectURL, err := forwardResponse.Location() + + if err != nil { + if err != http.ErrNoLocation { + tracing.SetErrorAndDebugLog(r, "Error reading response location header %s. Cause: %s", config.Address, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + } else if redirectURL.String() != "" { + // Set the location in our response if one was sent back. + w.Header().Set("Location", redirectURL.String()) + } + + tracing.LogResponseCode(tracing.GetSpan(r), forwardResponse.StatusCode) + w.WriteHeader(forwardResponse.StatusCode) + + if _, err = w.Write(body); err != nil { + log.Error(err) + } + return + } + + for _, headerName := range config.AuthResponseHeaders { + r.Header.Set(headerName, forwardResponse.Header.Get(headerName)) + } + + r.RequestURI = r.URL.RequestURI() + next(w, r) +} + +func writeHeader(req *http.Request, forwardReq *http.Request, trustForwardHeader bool) { + utils.CopyHeaders(forwardReq.Header, req.Header) + utils.RemoveHeaders(forwardReq.Header, forward.HopHeaders...) + + if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { + if trustForwardHeader { + if prior, ok := req.Header[forward.XForwardedFor]; ok { + clientIP = strings.Join(prior, ", ") + ", " + clientIP + } + } + forwardReq.Header.Set(forward.XForwardedFor, clientIP) + } + + if xMethod := req.Header.Get(xForwardedMethod); xMethod != "" && trustForwardHeader { + forwardReq.Header.Set(xForwardedMethod, xMethod) + } else if req.Method != "" { + forwardReq.Header.Set(xForwardedMethod, req.Method) + } else { + forwardReq.Header.Del(xForwardedMethod) + } + + if xfp := req.Header.Get(forward.XForwardedProto); xfp != "" && trustForwardHeader { + forwardReq.Header.Set(forward.XForwardedProto, xfp) + } else if req.TLS != nil { + forwardReq.Header.Set(forward.XForwardedProto, "https") + } else { + forwardReq.Header.Set(forward.XForwardedProto, "http") + } + + if xfp := req.Header.Get(forward.XForwardedPort); xfp != "" && trustForwardHeader { + forwardReq.Header.Set(forward.XForwardedPort, xfp) + } + + if xfh := req.Header.Get(forward.XForwardedHost); xfh != "" && trustForwardHeader { + forwardReq.Header.Set(forward.XForwardedHost, xfh) + } else if req.Host != "" { + forwardReq.Header.Set(forward.XForwardedHost, req.Host) + } else { + forwardReq.Header.Del(forward.XForwardedHost) + } + + if xfURI := req.Header.Get(xForwardedURI); xfURI != "" && trustForwardHeader { + forwardReq.Header.Set(xForwardedURI, xfURI) + } else if req.URL.RequestURI() != "" { + forwardReq.Header.Set(xForwardedURI, req.URL.RequestURI()) + } else { + forwardReq.Header.Del(xForwardedURI) + } +} diff --git a/old/middlewares/auth/forward_test.go b/old/middlewares/auth/forward_test.go new file mode 100644 index 000000000..53b6d247b --- /dev/null +++ b/old/middlewares/auth/forward_test.go @@ -0,0 +1,392 @@ +package auth + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/containous/traefik/old/middlewares/tracing" + "github.com/containous/traefik/old/types" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/negroni" + "github.com/vulcand/oxy/forward" +) + +func TestForwardAuthFail(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Forbidden", http.StatusForbidden) + })) + defer server.Close() + + middleware, err := NewAuthenticator(&types.Auth{ + Forward: &types.Forward{ + Address: server.URL, + }, + }, &tracing.Tracing{}) + assert.NoError(t, err, "there should be no error") + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "traefik") + }) + n := negroni.New(middleware) + n.UseHandler(handler) + ts := httptest.NewServer(n) + defer ts.Close() + + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + res, err := http.DefaultClient.Do(req) + assert.NoError(t, err, "there should be no error") + assert.Equal(t, http.StatusForbidden, res.StatusCode, "they should be equal") + + body, err := ioutil.ReadAll(res.Body) + assert.NoError(t, err, "there should be no error") + assert.Equal(t, "Forbidden\n", string(body), "they should be equal") +} + +func TestForwardAuthSuccess(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Auth-User", "user@example.com") + w.Header().Set("X-Auth-Secret", "secret") + fmt.Fprintln(w, "Success") + })) + defer server.Close() + + middleware, err := NewAuthenticator(&types.Auth{ + Forward: &types.Forward{ + Address: server.URL, + AuthResponseHeaders: []string{"X-Auth-User"}, + }, + }, &tracing.Tracing{}) + assert.NoError(t, err, "there should be no error") + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "user@example.com", r.Header.Get("X-Auth-User")) + assert.Empty(t, r.Header.Get("X-Auth-Secret")) + fmt.Fprintln(w, "traefik") + }) + n := negroni.New(middleware) + n.UseHandler(handler) + ts := httptest.NewServer(n) + defer ts.Close() + + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + res, err := http.DefaultClient.Do(req) + assert.NoError(t, err, "there should be no error") + assert.Equal(t, http.StatusOK, res.StatusCode, "they should be equal") + + body, err := ioutil.ReadAll(res.Body) + assert.NoError(t, err, "there should be no error") + assert.Equal(t, "traefik\n", string(body), "they should be equal") +} + +func TestForwardAuthRedirect(t *testing.T) { + authTs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "http://example.com/redirect-test", http.StatusFound) + })) + defer authTs.Close() + + authMiddleware, err := NewAuthenticator(&types.Auth{ + Forward: &types.Forward{ + Address: authTs.URL, + }, + }, &tracing.Tracing{}) + assert.NoError(t, err, "there should be no error") + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "traefik") + }) + n := negroni.New(authMiddleware) + n.UseHandler(handler) + ts := httptest.NewServer(n) + defer ts.Close() + + client := &http.Client{ + CheckRedirect: func(r *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + }, + } + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + res, err := client.Do(req) + assert.NoError(t, err, "there should be no error") + assert.Equal(t, http.StatusFound, res.StatusCode, "they should be equal") + + location, err := res.Location() + assert.NoError(t, err, "there should be no error") + assert.Equal(t, "http://example.com/redirect-test", location.String(), "they should be equal") + + body, err := ioutil.ReadAll(res.Body) + assert.NoError(t, err, "there should be no error") + assert.NotEmpty(t, string(body), "there should be something in the body") +} + +func TestForwardAuthRemoveHopByHopHeaders(t *testing.T) { + authTs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + headers := w.Header() + for _, header := range forward.HopHeaders { + if header == forward.TransferEncoding { + headers.Add(header, "identity") + } else { + headers.Add(header, "test") + } + } + + http.Redirect(w, r, "http://example.com/redirect-test", http.StatusFound) + })) + defer authTs.Close() + + authMiddleware, err := NewAuthenticator(&types.Auth{ + Forward: &types.Forward{ + Address: authTs.URL, + }, + }, &tracing.Tracing{}) + assert.NoError(t, err, "there should be no error") + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "traefik") + }) + n := negroni.New(authMiddleware) + n.UseHandler(handler) + ts := httptest.NewServer(n) + defer ts.Close() + + client := &http.Client{ + CheckRedirect: func(r *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + }, + } + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + res, err := client.Do(req) + assert.NoError(t, err, "there should be no error") + assert.Equal(t, http.StatusFound, res.StatusCode, "they should be equal") + + for _, header := range forward.HopHeaders { + assert.Equal(t, "", res.Header.Get(header), "hop-by-hop header '%s' mustn't be set", header) + } + + location, err := res.Location() + assert.NoError(t, err, "there should be no error") + assert.Equal(t, "http://example.com/redirect-test", location.String(), "they should be equal") + + body, err := ioutil.ReadAll(res.Body) + assert.NoError(t, err, "there should be no error") + assert.NotEmpty(t, string(body), "there should be something in the body") +} + +func TestForwardAuthFailResponseHeaders(t *testing.T) { + authTs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cookie := &http.Cookie{Name: "example", Value: "testing", Path: "/"} + http.SetCookie(w, cookie) + w.Header().Add("X-Foo", "bar") + http.Error(w, "Forbidden", http.StatusForbidden) + })) + defer authTs.Close() + + authMiddleware, err := NewAuthenticator(&types.Auth{ + Forward: &types.Forward{ + Address: authTs.URL, + }, + }, &tracing.Tracing{}) + assert.NoError(t, err, "there should be no error") + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "traefik") + }) + n := negroni.New(authMiddleware) + n.UseHandler(handler) + ts := httptest.NewServer(n) + defer ts.Close() + + req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) + client := &http.Client{} + res, err := client.Do(req) + assert.NoError(t, err, "there should be no error") + assert.Equal(t, http.StatusForbidden, res.StatusCode, "they should be equal") + + require.Len(t, res.Cookies(), 1) + for _, cookie := range res.Cookies() { + assert.Equal(t, "testing", cookie.Value, "they should be equal") + } + + expectedHeaders := http.Header{ + "Content-Length": []string{"10"}, + "Content-Type": []string{"text/plain; charset=utf-8"}, + "X-Foo": []string{"bar"}, + "Set-Cookie": []string{"example=testing; Path=/"}, + "X-Content-Type-Options": []string{"nosniff"}, + } + + assert.Len(t, res.Header, 6) + for key, value := range expectedHeaders { + assert.Equal(t, value, res.Header[key]) + } + + body, err := ioutil.ReadAll(res.Body) + assert.NoError(t, err, "there should be no error") + assert.Equal(t, "Forbidden\n", string(body), "they should be equal") +} + +func Test_writeHeader(t *testing.T) { + testCases := []struct { + name string + headers map[string]string + trustForwardHeader bool + emptyHost bool + expectedHeaders map[string]string + checkForUnexpectedHeaders bool + }{ + { + name: "trust Forward Header", + headers: map[string]string{ + "Accept": "application/json", + "X-Forwarded-Host": "fii.bir", + }, + trustForwardHeader: true, + expectedHeaders: map[string]string{ + "Accept": "application/json", + "X-Forwarded-Host": "fii.bir", + }, + }, + { + name: "not trust Forward Header", + headers: map[string]string{ + "Accept": "application/json", + "X-Forwarded-Host": "fii.bir", + }, + trustForwardHeader: false, + expectedHeaders: map[string]string{ + "Accept": "application/json", + "X-Forwarded-Host": "foo.bar", + }, + }, + { + name: "trust Forward Header with empty Host", + headers: map[string]string{ + "Accept": "application/json", + "X-Forwarded-Host": "fii.bir", + }, + trustForwardHeader: true, + emptyHost: true, + expectedHeaders: map[string]string{ + "Accept": "application/json", + "X-Forwarded-Host": "fii.bir", + }, + }, + { + name: "not trust Forward Header with empty Host", + headers: map[string]string{ + "Accept": "application/json", + "X-Forwarded-Host": "fii.bir", + }, + trustForwardHeader: false, + emptyHost: true, + expectedHeaders: map[string]string{ + "Accept": "application/json", + "X-Forwarded-Host": "", + }, + }, + { + name: "trust Forward Header with forwarded URI", + headers: map[string]string{ + "Accept": "application/json", + "X-Forwarded-Host": "fii.bir", + "X-Forwarded-Uri": "/forward?q=1", + }, + trustForwardHeader: true, + expectedHeaders: map[string]string{ + "Accept": "application/json", + "X-Forwarded-Host": "fii.bir", + "X-Forwarded-Uri": "/forward?q=1", + }, + }, + { + name: "not trust Forward Header with forward requested URI", + headers: map[string]string{ + "Accept": "application/json", + "X-Forwarded-Host": "fii.bir", + "X-Forwarded-Uri": "/forward?q=1", + }, + trustForwardHeader: false, + expectedHeaders: map[string]string{ + "Accept": "application/json", + "X-Forwarded-Host": "foo.bar", + "X-Forwarded-Uri": "/path?q=1", + }, + }, { + name: "trust Forward Header with forwarded request Method", + headers: map[string]string{ + "X-Forwarded-Method": "OPTIONS", + }, + trustForwardHeader: true, + expectedHeaders: map[string]string{ + "X-Forwarded-Method": "OPTIONS", + }, + }, + { + name: "not trust Forward Header with forward request Method", + headers: map[string]string{ + "X-Forwarded-Method": "OPTIONS", + }, + trustForwardHeader: false, + expectedHeaders: map[string]string{ + "X-Forwarded-Method": "GET", + }, + }, + { + name: "remove hop-by-hop headers", + headers: map[string]string{ + forward.Connection: "Connection", + forward.KeepAlive: "KeepAlive", + forward.ProxyAuthenticate: "ProxyAuthenticate", + forward.ProxyAuthorization: "ProxyAuthorization", + forward.Te: "Te", + forward.Trailers: "Trailers", + forward.TransferEncoding: "TransferEncoding", + forward.Upgrade: "Upgrade", + "X-CustomHeader": "CustomHeader", + }, + trustForwardHeader: false, + expectedHeaders: map[string]string{ + "X-CustomHeader": "CustomHeader", + "X-Forwarded-Proto": "http", + "X-Forwarded-Host": "foo.bar", + "X-Forwarded-Uri": "/path?q=1", + "X-Forwarded-Method": "GET", + }, + checkForUnexpectedHeaders: true, + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + + req := testhelpers.MustNewRequest(http.MethodGet, "http://foo.bar/path?q=1", nil) + for key, value := range test.headers { + req.Header.Set(key, value) + } + + if test.emptyHost { + req.Host = "" + } + + forwardReq := testhelpers.MustNewRequest(http.MethodGet, "http://foo.bar/path?q=1", nil) + + writeHeader(req, forwardReq, test.trustForwardHeader) + + actualHeaders := forwardReq.Header + expectedHeaders := test.expectedHeaders + for key, value := range expectedHeaders { + assert.Equal(t, value, actualHeaders.Get(key)) + actualHeaders.Del(key) + } + if test.checkForUnexpectedHeaders { + for key := range actualHeaders { + assert.Fail(t, "Unexpected header found", key) + } + } + }) + } +} diff --git a/middlewares/auth/parser.go b/old/middlewares/auth/parser.go similarity index 96% rename from middlewares/auth/parser.go rename to old/middlewares/auth/parser.go index 61492a99f..885d56fed 100644 --- a/middlewares/auth/parser.go +++ b/old/middlewares/auth/parser.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/types" ) func parserBasicUsers(basic *types.Basic) (map[string]string, error) { diff --git a/middlewares/cbreaker.go b/old/middlewares/cbreaker.go similarity index 92% rename from middlewares/cbreaker.go rename to old/middlewares/cbreaker.go index 8afdcd6f0..a8f534960 100644 --- a/middlewares/cbreaker.go +++ b/old/middlewares/cbreaker.go @@ -3,8 +3,8 @@ package middlewares import ( "net/http" - "github.com/containous/traefik/log" - "github.com/containous/traefik/middlewares/tracing" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/middlewares/tracing" "github.com/vulcand/oxy/cbreaker" ) diff --git a/middlewares/compress.go b/old/middlewares/compress.go similarity index 94% rename from middlewares/compress.go rename to old/middlewares/compress.go index 865780c02..9989e889c 100644 --- a/middlewares/compress.go +++ b/old/middlewares/compress.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/NYTimes/gziphandler" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" ) // Compress is a middleware that allows to compress the response diff --git a/middlewares/compress_test.go b/old/middlewares/compress_test.go similarity index 100% rename from middlewares/compress_test.go rename to old/middlewares/compress_test.go diff --git a/middlewares/empty_backend_handler.go b/old/middlewares/empty_backend_handler.go similarity index 100% rename from middlewares/empty_backend_handler.go rename to old/middlewares/empty_backend_handler.go diff --git a/middlewares/empty_backend_handler_test.go b/old/middlewares/empty_backend_handler_test.go similarity index 100% rename from middlewares/empty_backend_handler_test.go rename to old/middlewares/empty_backend_handler_test.go diff --git a/middlewares/errorpages/error_pages.go b/old/middlewares/errorpages/error_pages.go similarity index 97% rename from middlewares/errorpages/error_pages.go rename to old/middlewares/errorpages/error_pages.go index 3c62eb9e8..a5dcd584b 100644 --- a/middlewares/errorpages/error_pages.go +++ b/old/middlewares/errorpages/error_pages.go @@ -11,9 +11,9 @@ import ( "strconv" "strings" - "github.com/containous/traefik/log" - "github.com/containous/traefik/middlewares" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/middlewares" + "github.com/containous/traefik/old/types" "github.com/vulcand/oxy/forward" "github.com/vulcand/oxy/utils" ) diff --git a/middlewares/errorpages/error_pages_test.go b/old/middlewares/errorpages/error_pages_test.go similarity index 99% rename from middlewares/errorpages/error_pages_test.go rename to old/middlewares/errorpages/error_pages_test.go index db11c7076..aace13667 100644 --- a/middlewares/errorpages/error_pages_test.go +++ b/old/middlewares/errorpages/error_pages_test.go @@ -7,8 +7,8 @@ import ( "strconv" "testing" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/testhelpers" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/urfave/negroni" diff --git a/middlewares/forwardedheaders/forwarded_header.go b/old/middlewares/forwardedheaders/forwarded_header.go similarity index 100% rename from middlewares/forwardedheaders/forwarded_header.go rename to old/middlewares/forwardedheaders/forwarded_header.go diff --git a/middlewares/forwardedheaders/forwarded_header_test.go b/old/middlewares/forwardedheaders/forwarded_header_test.go similarity index 100% rename from middlewares/forwardedheaders/forwarded_header_test.go rename to old/middlewares/forwardedheaders/forwarded_header_test.go diff --git a/middlewares/handlerSwitcher.go b/old/middlewares/handlerSwitcher.go similarity index 100% rename from middlewares/handlerSwitcher.go rename to old/middlewares/handlerSwitcher.go diff --git a/middlewares/headers.go b/old/middlewares/headers.go similarity index 97% rename from middlewares/headers.go rename to old/middlewares/headers.go index 741ba167a..dee13fc5c 100644 --- a/middlewares/headers.go +++ b/old/middlewares/headers.go @@ -5,7 +5,7 @@ package middlewares import ( "net/http" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/types" ) // HeaderOptions is a struct for specifying configuration options for the headers middleware. diff --git a/middlewares/headers_test.go b/old/middlewares/headers_test.go similarity index 100% rename from middlewares/headers_test.go rename to old/middlewares/headers_test.go diff --git a/middlewares/ip_whitelister.go b/old/middlewares/ip_whitelister.go similarity index 94% rename from middlewares/ip_whitelister.go rename to old/middlewares/ip_whitelister.go index a25921014..31355e462 100644 --- a/middlewares/ip_whitelister.go +++ b/old/middlewares/ip_whitelister.go @@ -5,8 +5,8 @@ import ( "net/http" "github.com/containous/traefik/ip" - "github.com/containous/traefik/log" - "github.com/containous/traefik/middlewares/tracing" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/middlewares/tracing" "github.com/pkg/errors" "github.com/urfave/negroni" ) diff --git a/middlewares/ip_whitelister_test.go b/old/middlewares/ip_whitelister_test.go similarity index 100% rename from middlewares/ip_whitelister_test.go rename to old/middlewares/ip_whitelister_test.go diff --git a/middlewares/pipelining/pipelining.go b/old/middlewares/pipelining/pipelining.go similarity index 100% rename from middlewares/pipelining/pipelining.go rename to old/middlewares/pipelining/pipelining.go diff --git a/middlewares/pipelining/pipelining_test.go b/old/middlewares/pipelining/pipelining_test.go similarity index 100% rename from middlewares/pipelining/pipelining_test.go rename to old/middlewares/pipelining/pipelining_test.go diff --git a/middlewares/recover.go b/old/middlewares/recover.go similarity index 97% rename from middlewares/recover.go rename to old/middlewares/recover.go index 88a98629d..916f77ec5 100644 --- a/middlewares/recover.go +++ b/old/middlewares/recover.go @@ -4,7 +4,7 @@ import ( "net/http" "runtime" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" "github.com/urfave/negroni" ) diff --git a/middlewares/recover_test.go b/old/middlewares/recover_test.go similarity index 100% rename from middlewares/recover_test.go rename to old/middlewares/recover_test.go diff --git a/old/middlewares/redirect/redirect.go b/old/middlewares/redirect/redirect.go new file mode 100644 index 000000000..58ade5064 --- /dev/null +++ b/old/middlewares/redirect/redirect.go @@ -0,0 +1,163 @@ +package redirect + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "regexp" + "strings" + "text/template" + + "github.com/containous/traefik/old/configuration" + "github.com/containous/traefik/old/middlewares" + "github.com/urfave/negroni" + "github.com/vulcand/oxy/utils" +) + +const ( + defaultRedirectRegex = `^(?:https?:\/\/)?([\w\._-]+)(?::\d+)?(.*)$` +) + +// NewEntryPointHandler create a new redirection handler base on entry point +func NewEntryPointHandler(dstEntryPoint *configuration.EntryPoint, permanent bool) (negroni.Handler, error) { + exp := regexp.MustCompile(`(:\d+)`) + match := exp.FindStringSubmatch(dstEntryPoint.Address) + if len(match) == 0 { + return nil, fmt.Errorf("bad Address format %q", dstEntryPoint.Address) + } + + protocol := "http" + if dstEntryPoint.TLS != nil { + protocol = "https" + } + + replacement := protocol + "://${1}" + match[0] + "${2}" + + return NewRegexHandler(defaultRedirectRegex, replacement, permanent) +} + +// NewRegexHandler create a new redirection handler base on regex +func NewRegexHandler(exp string, replacement string, permanent bool) (negroni.Handler, error) { + re, err := regexp.Compile(exp) + if err != nil { + return nil, err + } + + return &handler{ + regexp: re, + replacement: replacement, + permanent: permanent, + errHandler: utils.DefaultHandler, + }, nil +} + +type handler struct { + regexp *regexp.Regexp + replacement string + permanent bool + errHandler utils.ErrorHandler +} + +func (h *handler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next http.HandlerFunc) { + oldURL := rawURL(req) + + // only continue if the Regexp param matches the URL + if !h.regexp.MatchString(oldURL) { + next.ServeHTTP(rw, req) + return + } + + // apply a rewrite regexp to the URL + newURL := h.regexp.ReplaceAllString(oldURL, h.replacement) + + // replace any variables that may be in there + rewrittenURL := &bytes.Buffer{} + if err := applyString(newURL, rewrittenURL, req); err != nil { + h.errHandler.ServeHTTP(rw, req, err) + return + } + + // parse the rewritten URL and replace request URL with it + parsedURL, err := url.Parse(rewrittenURL.String()) + if err != nil { + h.errHandler.ServeHTTP(rw, req, err) + return + } + + if stripPrefix, stripPrefixOk := req.Context().Value(middlewares.StripPrefixKey).(string); stripPrefixOk { + if len(stripPrefix) > 0 { + parsedURL.Path = stripPrefix + } + } + + if addPrefix, addPrefixOk := req.Context().Value(middlewares.AddPrefixKey).(string); addPrefixOk { + if len(addPrefix) > 0 { + parsedURL.Path = strings.Replace(parsedURL.Path, addPrefix, "", 1) + } + } + + if replacePath, replacePathOk := req.Context().Value(middlewares.ReplacePathKey).(string); replacePathOk { + if len(replacePath) > 0 { + parsedURL.Path = replacePath + } + } + + if newURL != oldURL { + handler := &moveHandler{location: parsedURL, permanent: h.permanent} + handler.ServeHTTP(rw, req) + return + } + + req.URL = parsedURL + + // make sure the request URI corresponds the rewritten URL + req.RequestURI = req.URL.RequestURI() + next.ServeHTTP(rw, req) +} + +type moveHandler struct { + location *url.URL + permanent bool +} + +func (m *moveHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("Location", m.location.String()) + status := http.StatusFound + if m.permanent { + status = http.StatusMovedPermanently + } + rw.WriteHeader(status) + rw.Write([]byte(http.StatusText(status))) +} + +func rawURL(request *http.Request) string { + scheme := "http" + if request.TLS != nil || isXForwardedHTTPS(request) { + scheme = "https" + } + + return strings.Join([]string{scheme, "://", request.Host, request.RequestURI}, "") +} + +func isXForwardedHTTPS(request *http.Request) bool { + xForwardedProto := request.Header.Get("X-Forwarded-Proto") + + return len(xForwardedProto) > 0 && xForwardedProto == "https" +} + +func applyString(in string, out io.Writer, request *http.Request) error { + t, err := template.New("t").Parse(in) + if err != nil { + return err + } + + data := struct { + Request *http.Request + }{ + Request: request, + } + + return t.Execute(out, data) +} diff --git a/old/middlewares/redirect/redirect_test.go b/old/middlewares/redirect/redirect_test.go new file mode 100644 index 000000000..522d76821 --- /dev/null +++ b/old/middlewares/redirect/redirect_test.go @@ -0,0 +1,182 @@ +package redirect + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/containous/traefik/old/configuration" + "github.com/containous/traefik/testhelpers" + "github.com/containous/traefik/tls" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewEntryPointHandler(t *testing.T) { + testCases := []struct { + desc string + entryPoint *configuration.EntryPoint + permanent bool + url string + expectedURL string + expectedStatus int + errorExpected bool + }{ + { + desc: "HTTP to HTTPS", + entryPoint: &configuration.EntryPoint{Address: ":443", TLS: &tls.TLS{}}, + url: "http://foo:80", + expectedURL: "https://foo:443", + expectedStatus: http.StatusFound, + }, + { + desc: "HTTPS to HTTP", + entryPoint: &configuration.EntryPoint{Address: ":80"}, + url: "https://foo:443", + expectedURL: "http://foo:80", + expectedStatus: http.StatusFound, + }, + { + desc: "HTTP to HTTP", + entryPoint: &configuration.EntryPoint{Address: ":88"}, + url: "http://foo:80", + expectedURL: "http://foo:88", + expectedStatus: http.StatusFound, + }, + { + desc: "HTTP to HTTPS permanent", + entryPoint: &configuration.EntryPoint{Address: ":443", TLS: &tls.TLS{}}, + permanent: true, + url: "http://foo:80", + expectedURL: "https://foo:443", + expectedStatus: http.StatusMovedPermanently, + }, + { + desc: "HTTPS to HTTP permanent", + entryPoint: &configuration.EntryPoint{Address: ":80"}, + permanent: true, + url: "https://foo:443", + expectedURL: "http://foo:80", + expectedStatus: http.StatusMovedPermanently, + }, + { + desc: "invalid address", + entryPoint: &configuration.EntryPoint{Address: ":foo", TLS: &tls.TLS{}}, + url: "http://foo:80", + errorExpected: true, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + handler, err := NewEntryPointHandler(test.entryPoint, test.permanent) + + if test.errorExpected { + require.Error(t, err) + } else { + require.NoError(t, err) + + recorder := httptest.NewRecorder() + r := testhelpers.MustNewRequest(http.MethodGet, test.url, nil) + handler.ServeHTTP(recorder, r, nil) + + location, err := recorder.Result().Location() + require.NoError(t, err) + + assert.Equal(t, test.expectedURL, location.String()) + assert.Equal(t, test.expectedStatus, recorder.Code) + } + }) + } +} + +func TestNewRegexHandler(t *testing.T) { + testCases := []struct { + desc string + regex string + replacement string + permanent bool + url string + expectedURL string + expectedStatus int + errorExpected bool + }{ + { + desc: "simple redirection", + regex: `^(?:http?:\/\/)(foo)(\.com)(:\d+)(.*)$`, + replacement: "https://${1}bar$2:443$4", + url: "http://foo.com:80", + expectedURL: "https://foobar.com:443", + expectedStatus: http.StatusFound, + }, + { + desc: "use request header", + regex: `^(?:http?:\/\/)(foo)(\.com)(:\d+)(.*)$`, + replacement: `https://${1}{{ .Request.Header.Get "X-Foo" }}$2:443$4`, + url: "http://foo.com:80", + expectedURL: "https://foobar.com:443", + expectedStatus: http.StatusFound, + }, + { + desc: "URL doesn't match regex", + regex: `^(?:http?:\/\/)(foo)(\.com)(:\d+)(.*)$`, + replacement: "https://${1}bar$2:443$4", + url: "http://bar.com:80", + expectedStatus: http.StatusOK, + }, + { + desc: "invalid rewritten URL", + regex: `^(.*)$`, + replacement: "http://192.168.0.%31/", + url: "http://foo.com:80", + expectedStatus: http.StatusBadGateway, + }, + { + desc: "invalid regex", + regex: `^(.*`, + replacement: "$1", + url: "http://foo.com:80", + errorExpected: true, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + handler, err := NewRegexHandler(test.regex, test.replacement, test.permanent) + + if test.errorExpected { + require.Nil(t, handler) + require.Error(t, err) + } else { + require.NotNil(t, handler) + require.NoError(t, err) + + recorder := httptest.NewRecorder() + r := testhelpers.MustNewRequest(http.MethodGet, test.url, nil) + r.Header.Set("X-Foo", "bar") + next := func(rw http.ResponseWriter, req *http.Request) {} + handler.ServeHTTP(recorder, r, next) + + if test.expectedStatus == http.StatusMovedPermanently || test.expectedStatus == http.StatusFound { + assert.Equal(t, test.expectedStatus, recorder.Code) + + location, err := recorder.Result().Location() + require.NoError(t, err) + + assert.Equal(t, test.expectedURL, location.String()) + } else { + assert.Equal(t, test.expectedStatus, recorder.Code) + + location, err := recorder.Result().Location() + require.Errorf(t, err, "Location %v", location) + } + } + }) + } +} diff --git a/middlewares/replace_path.go b/old/middlewares/replace_path.go similarity index 100% rename from middlewares/replace_path.go rename to old/middlewares/replace_path.go diff --git a/middlewares/replace_path_regex.go b/old/middlewares/replace_path_regex.go similarity index 96% rename from middlewares/replace_path_regex.go rename to old/middlewares/replace_path_regex.go index d753e86c0..ce2e96f93 100644 --- a/middlewares/replace_path_regex.go +++ b/old/middlewares/replace_path_regex.go @@ -6,7 +6,7 @@ import ( "regexp" "strings" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" ) // ReplacePathRegex is a middleware used to replace the path of a URL request with a regular expression diff --git a/middlewares/replace_path_regex_test.go b/old/middlewares/replace_path_regex_test.go similarity index 100% rename from middlewares/replace_path_regex_test.go rename to old/middlewares/replace_path_regex_test.go diff --git a/middlewares/replace_path_test.go b/old/middlewares/replace_path_test.go similarity index 100% rename from middlewares/replace_path_test.go rename to old/middlewares/replace_path_test.go diff --git a/middlewares/request_host.go b/old/middlewares/request_host.go similarity index 86% rename from middlewares/request_host.go rename to old/middlewares/request_host.go index 263b026ae..f92d5636c 100644 --- a/middlewares/request_host.go +++ b/old/middlewares/request_host.go @@ -6,7 +6,8 @@ import ( "net/http" "strings" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" ) var requestHostKey struct{} @@ -38,5 +39,7 @@ func GetCanonizedHost(ctx context.Context) string { if val, ok := ctx.Value(requestHostKey).(string); ok { return val } + + log.Warn("RequestHost is missing in the middleware chain") return "" } diff --git a/middlewares/request_host_test.go b/old/middlewares/request_host_test.go similarity index 100% rename from middlewares/request_host_test.go rename to old/middlewares/request_host_test.go diff --git a/middlewares/retry.go b/old/middlewares/retry.go similarity index 99% rename from middlewares/retry.go rename to old/middlewares/retry.go index 79a05d900..b5d3f29ab 100644 --- a/middlewares/retry.go +++ b/old/middlewares/retry.go @@ -8,7 +8,7 @@ import ( "net/http" "net/http/httptrace" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" ) // Compile time validation that the response writer implements http interfaces correctly. diff --git a/middlewares/retry_test.go b/old/middlewares/retry_test.go similarity index 100% rename from middlewares/retry_test.go rename to old/middlewares/retry_test.go diff --git a/middlewares/routes.go b/old/middlewares/routes.go similarity index 100% rename from middlewares/routes.go rename to old/middlewares/routes.go diff --git a/middlewares/secure.go b/old/middlewares/secure.go similarity index 96% rename from middlewares/secure.go rename to old/middlewares/secure.go index aedd228ab..2ee4858d9 100644 --- a/middlewares/secure.go +++ b/old/middlewares/secure.go @@ -1,7 +1,7 @@ package middlewares import ( - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/types" "github.com/unrolled/secure" ) diff --git a/old/middlewares/stateful.go b/old/middlewares/stateful.go new file mode 100644 index 000000000..4762d97a1 --- /dev/null +++ b/old/middlewares/stateful.go @@ -0,0 +1,12 @@ +package middlewares + +import "net/http" + +// Stateful interface groups all http interfaces that must be +// implemented by a stateful middleware (ie: recorders) +type Stateful interface { + http.ResponseWriter + http.Hijacker + http.Flusher + http.CloseNotifier +} diff --git a/middlewares/stats.go b/old/middlewares/stats.go similarity index 100% rename from middlewares/stats.go rename to old/middlewares/stats.go diff --git a/middlewares/stripPrefix.go b/old/middlewares/stripPrefix.go similarity index 100% rename from middlewares/stripPrefix.go rename to old/middlewares/stripPrefix.go diff --git a/middlewares/stripPrefixRegex.go b/old/middlewares/stripPrefixRegex.go similarity index 97% rename from middlewares/stripPrefixRegex.go rename to old/middlewares/stripPrefixRegex.go index c249f0fb2..9c3d19ff1 100644 --- a/middlewares/stripPrefixRegex.go +++ b/old/middlewares/stripPrefixRegex.go @@ -5,7 +5,7 @@ import ( "net/http" "github.com/containous/mux" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" ) // StripPrefixRegex is a middleware used to strip prefix from an URL request diff --git a/middlewares/stripPrefixRegex_test.go b/old/middlewares/stripPrefixRegex_test.go similarity index 100% rename from middlewares/stripPrefixRegex_test.go rename to old/middlewares/stripPrefixRegex_test.go diff --git a/middlewares/stripPrefix_test.go b/old/middlewares/stripPrefix_test.go similarity index 100% rename from middlewares/stripPrefix_test.go rename to old/middlewares/stripPrefix_test.go diff --git a/middlewares/tlsClientHeaders.go b/old/middlewares/tlsClientHeaders.go similarity index 98% rename from middlewares/tlsClientHeaders.go rename to old/middlewares/tlsClientHeaders.go index ac4a5cb41..d04bd36ae 100644 --- a/middlewares/tlsClientHeaders.go +++ b/old/middlewares/tlsClientHeaders.go @@ -9,8 +9,8 @@ import ( "net/url" "strings" - "github.com/containous/traefik/log" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" ) const xForwardedTLSClientCert = "X-Forwarded-Tls-Client-Cert" diff --git a/middlewares/tlsClientHeaders_test.go b/old/middlewares/tlsClientHeaders_test.go similarity index 99% rename from middlewares/tlsClientHeaders_test.go rename to old/middlewares/tlsClientHeaders_test.go index 583da2bb6..d429f12ee 100644 --- a/middlewares/tlsClientHeaders_test.go +++ b/old/middlewares/tlsClientHeaders_test.go @@ -12,8 +12,8 @@ import ( "strings" "testing" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/testhelpers" - "github.com/containous/traefik/types" "github.com/stretchr/testify/require" ) diff --git a/middlewares/tracing/carrier.go b/old/middlewares/tracing/carrier.go similarity index 100% rename from middlewares/tracing/carrier.go rename to old/middlewares/tracing/carrier.go diff --git a/middlewares/tracing/datadog/datadog.go b/old/middlewares/tracing/datadog/datadog.go similarity index 96% rename from middlewares/tracing/datadog/datadog.go rename to old/middlewares/tracing/datadog/datadog.go index 217c3aab6..49d7ed710 100644 --- a/middlewares/tracing/datadog/datadog.go +++ b/old/middlewares/tracing/datadog/datadog.go @@ -4,7 +4,7 @@ import ( "io" "strings" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" "github.com/opentracing/opentracing-go" ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer" datadog "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" diff --git a/old/middlewares/tracing/entrypoint.go b/old/middlewares/tracing/entrypoint.go new file mode 100644 index 000000000..40d090a3a --- /dev/null +++ b/old/middlewares/tracing/entrypoint.go @@ -0,0 +1,57 @@ +package tracing + +import ( + "fmt" + "net/http" + + "github.com/containous/traefik/old/log" + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/urfave/negroni" +) + +type entryPointMiddleware struct { + entryPoint string + *Tracing +} + +// NewEntryPoint creates a new middleware that the incoming request +func (t *Tracing) NewEntryPoint(name string) negroni.Handler { + log.Debug("Added entrypoint tracing middleware") + return &entryPointMiddleware{Tracing: t, entryPoint: name} +} + +func (e *entryPointMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + opNameFunc := generateEntryPointSpanName + + ctx, _ := e.Extract(opentracing.HTTPHeaders, HTTPHeadersCarrier(r.Header)) + span := e.StartSpan(opNameFunc(r, e.entryPoint, e.SpanNameLimit), ext.RPCServerOption(ctx)) + ext.Component.Set(span, e.ServiceName) + LogRequest(span, r) + ext.SpanKindRPCServer.Set(span) + + r = r.WithContext(opentracing.ContextWithSpan(r.Context(), span)) + + recorder := newStatusCodeRecoder(w, 200) + next(recorder, r) + + LogResponseCode(span, recorder.Status()) + span.Finish() +} + +// generateEntryPointSpanName will return a Span name of an appropriate lenth based on the 'spanLimit' argument. If needed, it will be truncated, but will not be less than 24 characters. +func generateEntryPointSpanName(r *http.Request, entryPoint string, spanLimit int) string { + name := fmt.Sprintf("Entrypoint %s %s", entryPoint, r.Host) + + if spanLimit > 0 && len(name) > spanLimit { + if spanLimit < EntryPointMaxLengthNumber { + log.Warnf("SpanNameLimit is set to be less than required static number of characters, defaulting to %d + 3", EntryPointMaxLengthNumber) + spanLimit = EntryPointMaxLengthNumber + 3 + } + hash := computeHash(name) + limit := (spanLimit - EntryPointMaxLengthNumber) / 2 + name = fmt.Sprintf("Entrypoint %s %s %s", truncateString(entryPoint, limit), truncateString(r.Host, limit), hash) + } + + return name +} diff --git a/old/middlewares/tracing/entrypoint_test.go b/old/middlewares/tracing/entrypoint_test.go new file mode 100644 index 000000000..865bcfc09 --- /dev/null +++ b/old/middlewares/tracing/entrypoint_test.go @@ -0,0 +1,70 @@ +package tracing + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/opentracing/opentracing-go/ext" + "github.com/stretchr/testify/assert" +) + +func TestEntryPointMiddlewareServeHTTP(t *testing.T) { + expectedTags := map[string]interface{}{ + "span.kind": ext.SpanKindRPCServerEnum, + "http.method": "GET", + "component": "", + "http.url": "http://www.test.com", + "http.host": "www.test.com", + } + + testCases := []struct { + desc string + entryPoint string + tracing *Tracing + expectedTags map[string]interface{} + expectedName string + }{ + { + desc: "no truncation test", + entryPoint: "test", + tracing: &Tracing{ + SpanNameLimit: 0, + tracer: &MockTracer{Span: &MockSpan{Tags: make(map[string]interface{})}}, + }, + expectedTags: expectedTags, + expectedName: "Entrypoint test www.test.com", + }, { + desc: "basic test", + entryPoint: "test", + tracing: &Tracing{ + SpanNameLimit: 25, + tracer: &MockTracer{Span: &MockSpan{Tags: make(map[string]interface{})}}, + }, + expectedTags: expectedTags, + expectedName: "Entrypoint te... ww... 39b97e58", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + e := &entryPointMiddleware{ + entryPoint: test.entryPoint, + Tracing: test.tracing, + } + + next := func(http.ResponseWriter, *http.Request) { + span := test.tracing.tracer.(*MockTracer).Span + + actual := span.Tags + assert.Equal(t, test.expectedTags, actual) + assert.Equal(t, test.expectedName, span.OpName) + } + + e.ServeHTTP(httptest.NewRecorder(), httptest.NewRequest(http.MethodGet, "http://www.test.com", nil), next) + }) + } +} diff --git a/old/middlewares/tracing/forwarder.go b/old/middlewares/tracing/forwarder.go new file mode 100644 index 000000000..d2ff48583 --- /dev/null +++ b/old/middlewares/tracing/forwarder.go @@ -0,0 +1,63 @@ +package tracing + +import ( + "fmt" + "net/http" + + "github.com/containous/traefik/old/log" + "github.com/opentracing/opentracing-go/ext" + "github.com/urfave/negroni" +) + +type forwarderMiddleware struct { + frontend string + backend string + opName string + *Tracing +} + +// NewForwarderMiddleware creates a new forwarder middleware that traces the outgoing request +func (t *Tracing) NewForwarderMiddleware(frontend, backend string) negroni.Handler { + log.Debugf("Added outgoing tracing middleware %s", frontend) + return &forwarderMiddleware{ + Tracing: t, + frontend: frontend, + backend: backend, + opName: generateForwardSpanName(frontend, backend, t.SpanNameLimit), + } +} + +func (f *forwarderMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + span, r, finish := StartSpan(r, f.opName, true) + defer finish() + span.SetTag("frontend.name", f.frontend) + span.SetTag("backend.name", f.backend) + ext.HTTPMethod.Set(span, r.Method) + ext.HTTPUrl.Set(span, fmt.Sprintf("%s%s", r.URL.String(), r.RequestURI)) + span.SetTag("http.host", r.Host) + + InjectRequestHeaders(r) + + recorder := newStatusCodeRecoder(w, 200) + + next(recorder, r) + + LogResponseCode(span, recorder.Status()) +} + +// generateForwardSpanName will return a Span name of an appropriate lenth based on the 'spanLimit' argument. If needed, it will be truncated, but will not be less than 21 characters +func generateForwardSpanName(frontend, backend string, spanLimit int) string { + name := fmt.Sprintf("forward %s/%s", frontend, backend) + + if spanLimit > 0 && len(name) > spanLimit { + if spanLimit < ForwardMaxLengthNumber { + log.Warnf("SpanNameLimit is set to be less than required static number of characters, defaulting to %d + 3", ForwardMaxLengthNumber) + spanLimit = ForwardMaxLengthNumber + 3 + } + hash := computeHash(name) + limit := (spanLimit - ForwardMaxLengthNumber) / 2 + name = fmt.Sprintf("forward %s/%s/%s", truncateString(frontend, limit), truncateString(backend, limit), hash) + } + + return name +} diff --git a/old/middlewares/tracing/forwarder_test.go b/old/middlewares/tracing/forwarder_test.go new file mode 100644 index 000000000..00c90c293 --- /dev/null +++ b/old/middlewares/tracing/forwarder_test.go @@ -0,0 +1,93 @@ +package tracing + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTracingNewForwarderMiddleware(t *testing.T) { + testCases := []struct { + desc string + tracer *Tracing + frontend string + backend string + expected *forwarderMiddleware + }{ + { + desc: "Simple Forward Tracer without truncation and hashing", + tracer: &Tracing{ + SpanNameLimit: 101, + }, + frontend: "some-service.domain.tld", + backend: "some-service.domain.tld", + expected: &forwarderMiddleware{ + Tracing: &Tracing{ + SpanNameLimit: 101, + }, + frontend: "some-service.domain.tld", + backend: "some-service.domain.tld", + opName: "forward some-service.domain.tld/some-service.domain.tld", + }, + }, { + desc: "Simple Forward Tracer with truncation and hashing", + tracer: &Tracing{ + SpanNameLimit: 101, + }, + frontend: "some-service-100.slug.namespace.environment.domain.tld", + backend: "some-service-100.slug.namespace.environment.domain.tld", + expected: &forwarderMiddleware{ + Tracing: &Tracing{ + SpanNameLimit: 101, + }, + frontend: "some-service-100.slug.namespace.environment.domain.tld", + backend: "some-service-100.slug.namespace.environment.domain.tld", + opName: "forward some-service-100.slug.namespace.enviro.../some-service-100.slug.namespace.enviro.../bc4a0d48", + }, + }, + { + desc: "Exactly 101 chars", + tracer: &Tracing{ + SpanNameLimit: 101, + }, + frontend: "some-service1.namespace.environment.domain.tld", + backend: "some-service1.namespace.environment.domain.tld", + expected: &forwarderMiddleware{ + Tracing: &Tracing{ + SpanNameLimit: 101, + }, + frontend: "some-service1.namespace.environment.domain.tld", + backend: "some-service1.namespace.environment.domain.tld", + opName: "forward some-service1.namespace.environment.domain.tld/some-service1.namespace.environment.domain.tld", + }, + }, + { + desc: "More than 101 chars", + tracer: &Tracing{ + SpanNameLimit: 101, + }, + frontend: "some-service1.frontend.namespace.environment.domain.tld", + backend: "some-service1.backend.namespace.environment.domain.tld", + expected: &forwarderMiddleware{ + Tracing: &Tracing{ + SpanNameLimit: 101, + }, + frontend: "some-service1.frontend.namespace.environment.domain.tld", + backend: "some-service1.backend.namespace.environment.domain.tld", + opName: "forward some-service1.frontend.namespace.envir.../some-service1.backend.namespace.enviro.../fa49dd23", + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + actual := test.tracer.NewForwarderMiddleware(test.frontend, test.backend) + + assert.Equal(t, test.expected, actual) + assert.True(t, len(test.expected.opName) <= test.tracer.SpanNameLimit) + }) + } +} diff --git a/middlewares/tracing/jaeger/jaeger.go b/old/middlewares/tracing/jaeger/jaeger.go similarity index 98% rename from middlewares/tracing/jaeger/jaeger.go rename to old/middlewares/tracing/jaeger/jaeger.go index 1792703eb..77ee85117 100644 --- a/middlewares/tracing/jaeger/jaeger.go +++ b/old/middlewares/tracing/jaeger/jaeger.go @@ -4,7 +4,7 @@ import ( "fmt" "io" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" "github.com/opentracing/opentracing-go" jaegercfg "github.com/uber/jaeger-client-go/config" "github.com/uber/jaeger-client-go/zipkin" diff --git a/middlewares/tracing/jaeger/logger.go b/old/middlewares/tracing/jaeger/logger.go similarity index 88% rename from middlewares/tracing/jaeger/logger.go rename to old/middlewares/tracing/jaeger/logger.go index a1b20ddb4..847c1e669 100644 --- a/middlewares/tracing/jaeger/logger.go +++ b/old/middlewares/tracing/jaeger/logger.go @@ -1,6 +1,6 @@ package jaeger -import "github.com/containous/traefik/log" +import "github.com/containous/traefik/old/log" // jaegerLogger is an implementation of the Logger interface that delegates to traefik log type jaegerLogger struct{} diff --git a/old/middlewares/tracing/status_code.go b/old/middlewares/tracing/status_code.go new file mode 100644 index 000000000..ec1802467 --- /dev/null +++ b/old/middlewares/tracing/status_code.go @@ -0,0 +1,57 @@ +package tracing + +import ( + "bufio" + "net" + "net/http" +) + +type statusCodeRecoder interface { + http.ResponseWriter + Status() int +} + +type statusCodeWithoutCloseNotify struct { + http.ResponseWriter + status int +} + +// WriteHeader captures the status code for later retrieval. +func (s *statusCodeWithoutCloseNotify) WriteHeader(status int) { + s.status = status + s.ResponseWriter.WriteHeader(status) +} + +// Status get response status +func (s *statusCodeWithoutCloseNotify) Status() int { + return s.status +} + +// Hijack hijacks the connection +func (s *statusCodeWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return s.ResponseWriter.(http.Hijacker).Hijack() +} + +// Flush sends any buffered data to the client. +func (s *statusCodeWithoutCloseNotify) Flush() { + if flusher, ok := s.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +type statusCodeWithCloseNotify struct { + *statusCodeWithoutCloseNotify +} + +func (s *statusCodeWithCloseNotify) CloseNotify() <-chan bool { + return s.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +// newStatusCodeRecoder returns an initialized statusCodeRecoder. +func newStatusCodeRecoder(rw http.ResponseWriter, status int) statusCodeRecoder { + recorder := &statusCodeWithoutCloseNotify{rw, status} + if _, ok := rw.(http.CloseNotifier); ok { + return &statusCodeWithCloseNotify{recorder} + } + return recorder +} diff --git a/middlewares/tracing/tracing.go b/old/middlewares/tracing/tracing.go similarity index 96% rename from middlewares/tracing/tracing.go rename to old/middlewares/tracing/tracing.go index f52039856..8f3380ec9 100644 --- a/middlewares/tracing/tracing.go +++ b/old/middlewares/tracing/tracing.go @@ -6,10 +6,10 @@ import ( "io" "net/http" - "github.com/containous/traefik/log" - "github.com/containous/traefik/middlewares/tracing/datadog" - "github.com/containous/traefik/middlewares/tracing/jaeger" - "github.com/containous/traefik/middlewares/tracing/zipkin" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/middlewares/tracing/datadog" + "github.com/containous/traefik/old/middlewares/tracing/jaeger" + "github.com/containous/traefik/old/middlewares/tracing/zipkin" "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" ) diff --git a/middlewares/tracing/tracing_test.go b/old/middlewares/tracing/tracing_test.go similarity index 100% rename from middlewares/tracing/tracing_test.go rename to old/middlewares/tracing/tracing_test.go diff --git a/old/middlewares/tracing/wrapper.go b/old/middlewares/tracing/wrapper.go new file mode 100644 index 000000000..8e9c566c1 --- /dev/null +++ b/old/middlewares/tracing/wrapper.go @@ -0,0 +1,66 @@ +package tracing + +import ( + "net/http" + + "github.com/urfave/negroni" +) + +// NewNegroniHandlerWrapper return a negroni.Handler struct +func (t *Tracing) NewNegroniHandlerWrapper(name string, handler negroni.Handler, clientSpanKind bool) negroni.Handler { + if t.IsEnabled() && handler != nil { + return &NegroniHandlerWrapper{ + name: name, + next: handler, + clientSpanKind: clientSpanKind, + } + } + return handler +} + +// NewHTTPHandlerWrapper return a http.Handler struct +func (t *Tracing) NewHTTPHandlerWrapper(name string, handler http.Handler, clientSpanKind bool) http.Handler { + if t.IsEnabled() && handler != nil { + return &HTTPHandlerWrapper{ + name: name, + handler: handler, + clientSpanKind: clientSpanKind, + } + } + return handler +} + +// NegroniHandlerWrapper is used to wrap negroni handler middleware +type NegroniHandlerWrapper struct { + name string + next negroni.Handler + clientSpanKind bool +} + +func (t *NegroniHandlerWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + var finish func() + _, r, finish = StartSpan(r, t.name, t.clientSpanKind) + defer finish() + + if t.next != nil { + t.next.ServeHTTP(rw, r, next) + } +} + +// HTTPHandlerWrapper is used to wrap http handler middleware +type HTTPHandlerWrapper struct { + name string + handler http.Handler + clientSpanKind bool +} + +func (t *HTTPHandlerWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + var finish func() + _, r, finish = StartSpan(r, t.name, t.clientSpanKind) + defer finish() + + if t.handler != nil { + t.handler.ServeHTTP(rw, r) + } + +} diff --git a/middlewares/tracing/zipkin/zipkin.go b/old/middlewares/tracing/zipkin/zipkin.go similarity index 97% rename from middlewares/tracing/zipkin/zipkin.go rename to old/middlewares/tracing/zipkin/zipkin.go index 90ed2bd56..79d20b2db 100644 --- a/middlewares/tracing/zipkin/zipkin.go +++ b/old/middlewares/tracing/zipkin/zipkin.go @@ -4,7 +4,7 @@ import ( "io" "time" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" "github.com/opentracing/opentracing-go" zipkin "github.com/openzipkin/zipkin-go-opentracing" ) diff --git a/old/ping/ping.go b/old/ping/ping.go new file mode 100644 index 000000000..1e7ffa860 --- /dev/null +++ b/old/ping/ping.go @@ -0,0 +1,36 @@ +package ping + +import ( + "context" + "fmt" + "net/http" + + "github.com/containous/mux" +) + +// Handler expose ping routes +type Handler struct { + EntryPoint string `description:"Ping entryPoint" export:"true"` + terminating bool +} + +// WithContext causes the ping endpoint to serve non 200 responses. +func (h *Handler) WithContext(ctx context.Context) { + go func() { + <-ctx.Done() + h.terminating = true + }() +} + +// AddRoutes add ping routes on a router +func (h *Handler) AddRoutes(router *mux.Router) { + router.Methods(http.MethodGet, http.MethodHead).Path("/ping"). + HandlerFunc(func(response http.ResponseWriter, request *http.Request) { + statusCode := http.StatusOK + if h.terminating { + statusCode = http.StatusServiceUnavailable + } + response.WriteHeader(statusCode) + fmt.Fprint(response, http.StatusText(statusCode)) + }) +} diff --git a/old/provider/acme/account.go b/old/provider/acme/account.go new file mode 100644 index 000000000..8538e49dc --- /dev/null +++ b/old/provider/acme/account.go @@ -0,0 +1,83 @@ +package acme + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + + "github.com/containous/traefik/old/log" + "github.com/xenolf/lego/acme" +) + +// Account is used to store lets encrypt registration info +type Account struct { + Email string + Registration *acme.RegistrationResource + PrivateKey []byte + KeyType acme.KeyType +} + +const ( + // RegistrationURLPathV1Regexp is a regexp which match ACME registration URL in the V1 format + RegistrationURLPathV1Regexp = `^.*/acme/reg/\d+$` +) + +// NewAccount creates an account +func NewAccount(email string, keyTypeValue string) (*Account, error) { + keyType := GetKeyType(keyTypeValue) + + // Create a user. New accounts need an email and private key to start + privateKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + return nil, err + } + + return &Account{ + Email: email, + PrivateKey: x509.MarshalPKCS1PrivateKey(privateKey), + KeyType: keyType, + }, nil +} + +// GetEmail returns email +func (a *Account) GetEmail() string { + return a.Email +} + +// GetRegistration returns lets encrypt registration resource +func (a *Account) GetRegistration() *acme.RegistrationResource { + return a.Registration +} + +// GetPrivateKey returns private key +func (a *Account) GetPrivateKey() crypto.PrivateKey { + if privateKey, err := x509.ParsePKCS1PrivateKey(a.PrivateKey); err == nil { + return privateKey + } + + log.Errorf("Cannot unmarshal private key %+v", a.PrivateKey) + return nil +} + +// GetKeyType used to determine which algo to used +func GetKeyType(value string) acme.KeyType { + switch value { + case "EC256": + return acme.EC256 + case "EC384": + return acme.EC384 + case "RSA2048": + return acme.RSA2048 + case "RSA4096": + return acme.RSA4096 + case "RSA8192": + return acme.RSA8192 + case "": + log.Infof("The key type is empty. Use default key type %v.", acme.RSA4096) + return acme.RSA4096 + default: + log.Infof("Unable to determine key type value %q. Use default key type %v.", value, acme.RSA4096) + return acme.RSA4096 + } +} diff --git a/old/provider/acme/challenge_http.go b/old/provider/acme/challenge_http.go new file mode 100644 index 000000000..02f576688 --- /dev/null +++ b/old/provider/acme/challenge_http.go @@ -0,0 +1,86 @@ +package acme + +import ( + "net" + "net/http" + "time" + + "github.com/cenk/backoff" + "github.com/containous/mux" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/safe" + "github.com/xenolf/lego/acme" +) + +var _ acme.ChallengeProviderTimeout = (*challengeHTTP)(nil) + +type challengeHTTP struct { + Store Store +} + +// Present presents a challenge to obtain new ACME certificate +func (c *challengeHTTP) Present(domain, token, keyAuth string) error { + return c.Store.SetHTTPChallengeToken(token, domain, []byte(keyAuth)) +} + +// CleanUp cleans the challenges when certificate is obtained +func (c *challengeHTTP) CleanUp(domain, token, keyAuth string) error { + return c.Store.RemoveHTTPChallengeToken(token, domain) +} + +// Timeout calculates the maximum of time allowed to resolved an ACME challenge +func (c *challengeHTTP) Timeout() (timeout, interval time.Duration) { + return 60 * time.Second, 5 * time.Second +} + +func getTokenValue(token, domain string, store Store) []byte { + log.Debugf("Looking for an existing ACME challenge for token %v...", token) + var result []byte + + operation := func() error { + var err error + result, err = store.GetHTTPChallengeToken(token, domain) + return err + } + + notify := func(err error, time time.Duration) { + log.Errorf("Error getting challenge for token retrying in %s", time) + } + + ebo := backoff.NewExponentialBackOff() + ebo.MaxElapsedTime = 60 * time.Second + err := backoff.RetryNotify(safe.OperationWithRecover(operation), ebo, notify) + if err != nil { + log.Errorf("Error getting challenge for token: %v", err) + return []byte{} + } + + return result +} + +// AddRoutes add routes on internal router +func (p *Provider) AddRoutes(router *mux.Router) { + router.Methods(http.MethodGet). + Path(acme.HTTP01ChallengePath("{token}")). + Handler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + vars := mux.Vars(req) + if token, ok := vars["token"]; ok { + domain, _, err := net.SplitHostPort(req.Host) + if err != nil { + log.Debugf("Unable to split host and port: %v. Fallback to request host.", err) + domain = req.Host + } + + tokenValue := getTokenValue(token, domain, p.Store) + if len(tokenValue) > 0 { + rw.WriteHeader(http.StatusOK) + _, err = rw.Write(tokenValue) + if err != nil { + log.Errorf("Unable to write token : %v", err) + } + return + } + } + rw.WriteHeader(http.StatusNotFound) + })) +} diff --git a/old/provider/acme/challenge_tls.go b/old/provider/acme/challenge_tls.go new file mode 100644 index 000000000..be71fe900 --- /dev/null +++ b/old/provider/acme/challenge_tls.go @@ -0,0 +1,52 @@ +package acme + +import ( + "crypto/tls" + + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" + "github.com/xenolf/lego/acme" +) + +var _ acme.ChallengeProvider = (*challengeTLSALPN)(nil) + +type challengeTLSALPN struct { + Store Store +} + +func (c *challengeTLSALPN) Present(domain, token, keyAuth string) error { + log.Debugf("TLS Challenge Present temp certificate for %s", domain) + + certPEMBlock, keyPEMBlock, err := acme.TLSALPNChallengeBlocks(domain, keyAuth) + if err != nil { + return err + } + + cert := &Certificate{Certificate: certPEMBlock, Key: keyPEMBlock, Domain: types.Domain{Main: "TEMP-" + domain}} + return c.Store.AddTLSChallenge(domain, cert) +} + +func (c *challengeTLSALPN) CleanUp(domain, token, keyAuth string) error { + log.Debugf("TLS Challenge CleanUp temp certificate for %s", domain) + + return c.Store.RemoveTLSChallenge(domain) +} + +// GetTLSALPNCertificate Get the temp certificate for ACME TLS-ALPN-O1 challenge. +func (p *Provider) GetTLSALPNCertificate(domain string) (*tls.Certificate, error) { + cert, err := p.Store.GetTLSChallenge(domain) + if err != nil { + return nil, err + } + + if cert == nil { + return nil, nil + } + + certificate, err := tls.X509KeyPair(cert.Certificate, cert.Key) + if err != nil { + return nil, err + } + + return &certificate, nil +} diff --git a/old/provider/acme/local_store.go b/old/provider/acme/local_store.go new file mode 100644 index 000000000..405c0f002 --- /dev/null +++ b/old/provider/acme/local_store.go @@ -0,0 +1,251 @@ +package acme + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "regexp" + "sync" + + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/safe" +) + +var _ Store = (*LocalStore)(nil) + +// LocalStore Store implementation for local file +type LocalStore struct { + filename string + storedData *StoredData + SaveDataChan chan *StoredData `json:"-"` + lock sync.RWMutex +} + +// NewLocalStore initializes a new LocalStore with a file name +func NewLocalStore(filename string) *LocalStore { + store := &LocalStore{filename: filename, SaveDataChan: make(chan *StoredData)} + store.listenSaveAction() + return store +} + +func (s *LocalStore) get() (*StoredData, error) { + if s.storedData == nil { + s.storedData = &StoredData{ + HTTPChallenges: make(map[string]map[string][]byte), + TLSChallenges: make(map[string]*Certificate), + } + + hasData, err := CheckFile(s.filename) + if err != nil { + return nil, err + } + + if hasData { + f, err := os.Open(s.filename) + if err != nil { + return nil, err + } + defer f.Close() + + file, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(file) > 0 { + if err := json.Unmarshal(file, s.storedData); err != nil { + return nil, err + } + } + + // Check if ACME Account is in ACME V1 format + if s.storedData.Account != nil && s.storedData.Account.Registration != nil { + isOldRegistration, err := regexp.MatchString(RegistrationURLPathV1Regexp, s.storedData.Account.Registration.URI) + if err != nil { + return nil, err + } + if isOldRegistration { + log.Debug("Reset ACME account.") + s.storedData.Account = nil + s.SaveDataChan <- s.storedData + } + } + + // Delete all certificates with no value + var certificates []*Certificate + for _, certificate := range s.storedData.Certificates { + if len(certificate.Certificate) == 0 || len(certificate.Key) == 0 { + log.Debugf("Delete certificate %v for domains %v which have no value.", certificate, certificate.Domain.ToStrArray()) + continue + } + certificates = append(certificates, certificate) + } + + if len(certificates) < len(s.storedData.Certificates) { + s.storedData.Certificates = certificates + s.SaveDataChan <- s.storedData + } + } + } + + return s.storedData, nil +} + +// listenSaveAction listens to a chan to store ACME data in json format into LocalStore.filename +func (s *LocalStore) listenSaveAction() { + safe.Go(func() { + for object := range s.SaveDataChan { + data, err := json.MarshalIndent(object, "", " ") + if err != nil { + log.Error(err) + } + + err = ioutil.WriteFile(s.filename, data, 0600) + if err != nil { + log.Error(err) + } + } + }) +} + +// GetAccount returns ACME Account +func (s *LocalStore) GetAccount() (*Account, error) { + storedData, err := s.get() + if err != nil { + return nil, err + } + + return storedData.Account, nil +} + +// SaveAccount stores ACME Account +func (s *LocalStore) SaveAccount(account *Account) error { + storedData, err := s.get() + if err != nil { + return err + } + + storedData.Account = account + s.SaveDataChan <- storedData + + return nil +} + +// GetCertificates returns ACME Certificates list +func (s *LocalStore) GetCertificates() ([]*Certificate, error) { + storedData, err := s.get() + if err != nil { + return nil, err + } + + return storedData.Certificates, nil +} + +// SaveCertificates stores ACME Certificates list +func (s *LocalStore) SaveCertificates(certificates []*Certificate) error { + storedData, err := s.get() + if err != nil { + return err + } + + storedData.Certificates = certificates + s.SaveDataChan <- storedData + + return nil +} + +// GetHTTPChallengeToken Get the http challenge token from the store +func (s *LocalStore) GetHTTPChallengeToken(token, domain string) ([]byte, error) { + s.lock.RLock() + defer s.lock.RUnlock() + + if s.storedData.HTTPChallenges == nil { + s.storedData.HTTPChallenges = map[string]map[string][]byte{} + } + + if _, ok := s.storedData.HTTPChallenges[token]; !ok { + return nil, fmt.Errorf("cannot find challenge for token %v", token) + } + + result, ok := s.storedData.HTTPChallenges[token][domain] + if !ok { + return nil, fmt.Errorf("cannot find challenge for token %v", token) + } + return result, nil +} + +// SetHTTPChallengeToken Set the http challenge token in the store +func (s *LocalStore) SetHTTPChallengeToken(token, domain string, keyAuth []byte) error { + s.lock.Lock() + defer s.lock.Unlock() + + if s.storedData.HTTPChallenges == nil { + s.storedData.HTTPChallenges = map[string]map[string][]byte{} + } + + if _, ok := s.storedData.HTTPChallenges[token]; !ok { + s.storedData.HTTPChallenges[token] = map[string][]byte{} + } + + s.storedData.HTTPChallenges[token][domain] = keyAuth + return nil +} + +// RemoveHTTPChallengeToken Remove the http challenge token in the store +func (s *LocalStore) RemoveHTTPChallengeToken(token, domain string) error { + s.lock.Lock() + defer s.lock.Unlock() + + if s.storedData.HTTPChallenges == nil { + return nil + } + + if _, ok := s.storedData.HTTPChallenges[token]; ok { + if _, domainOk := s.storedData.HTTPChallenges[token][domain]; domainOk { + delete(s.storedData.HTTPChallenges[token], domain) + } + if len(s.storedData.HTTPChallenges[token]) == 0 { + delete(s.storedData.HTTPChallenges, token) + } + } + return nil +} + +// AddTLSChallenge Add a certificate to the ACME TLS-ALPN-01 certificates storage +func (s *LocalStore) AddTLSChallenge(domain string, cert *Certificate) error { + s.lock.Lock() + defer s.lock.Unlock() + + if s.storedData.TLSChallenges == nil { + s.storedData.TLSChallenges = make(map[string]*Certificate) + } + + s.storedData.TLSChallenges[domain] = cert + return nil +} + +// GetTLSChallenge Get a certificate from the ACME TLS-ALPN-01 certificates storage +func (s *LocalStore) GetTLSChallenge(domain string) (*Certificate, error) { + s.lock.Lock() + defer s.lock.Unlock() + + if s.storedData.TLSChallenges == nil { + s.storedData.TLSChallenges = make(map[string]*Certificate) + } + + return s.storedData.TLSChallenges[domain], nil +} + +// RemoveTLSChallenge Remove a certificate from the ACME TLS-ALPN-01 certificates storage +func (s *LocalStore) RemoveTLSChallenge(domain string) error { + s.lock.Lock() + defer s.lock.Unlock() + + if s.storedData.TLSChallenges == nil { + return nil + } + + delete(s.storedData.TLSChallenges, domain) + return nil +} diff --git a/old/provider/acme/local_store_unix.go b/old/provider/acme/local_store_unix.go new file mode 100644 index 000000000..0dbb787be --- /dev/null +++ b/old/provider/acme/local_store_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package acme + +import ( + "fmt" + "os" +) + +// CheckFile checks file permissions and content size +func CheckFile(name string) (bool, error) { + f, err := os.Open(name) + if err != nil { + if os.IsNotExist(err) { + f, err = os.Create(name) + if err != nil { + return false, err + } + return false, f.Chmod(0600) + } + return false, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return false, err + } + + if fi.Mode().Perm()&0077 != 0 { + return false, fmt.Errorf("permissions %o for %s are too open, please use 600", fi.Mode().Perm(), name) + } + + return fi.Size() > 0, nil +} diff --git a/old/provider/acme/local_store_windows.go b/old/provider/acme/local_store_windows.go new file mode 100644 index 000000000..1804578a3 --- /dev/null +++ b/old/provider/acme/local_store_windows.go @@ -0,0 +1,27 @@ +package acme + +import "os" + +// CheckFile checks file content size +// Do not check file permissions on Windows right now +func CheckFile(name string) (bool, error) { + f, err := os.Open(name) + if err != nil { + if os.IsNotExist(err) { + f, err = os.Create(name) + if err != nil { + return false, err + } + return false, f.Chmod(0600) + } + return false, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return false, err + } + + return fi.Size() > 0, nil +} diff --git a/old/provider/acme/provider.go b/old/provider/acme/provider.go new file mode 100644 index 000000000..cfc45f337 --- /dev/null +++ b/old/provider/acme/provider.go @@ -0,0 +1,826 @@ +package acme + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + fmtlog "log" + "net" + "net/url" + "reflect" + "strings" + "sync" + "time" + + "github.com/cenk/backoff" + "github.com/containous/flaeg/parse" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" + "github.com/containous/traefik/rules" + "github.com/containous/traefik/safe" + traefiktls "github.com/containous/traefik/tls" + "github.com/containous/traefik/version" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/xenolf/lego/acme" + legolog "github.com/xenolf/lego/log" + "github.com/xenolf/lego/providers/dns" +) + +var ( + // OSCPMustStaple enables OSCP stapling as from https://github.com/xenolf/lego/issues/270 + OSCPMustStaple = false +) + +// Configuration holds ACME configuration provided by users +type Configuration struct { + Email string `description:"Email address used for registration"` + ACMELogging bool `description:"Enable debug logging of ACME actions."` + CAServer string `description:"CA server to use."` + Storage string `description:"Storage to use."` + EntryPoint string `description:"EntryPoint to use."` + KeyType string `description:"KeyType used for generating certificate private key. Allow value 'EC256', 'EC384', 'RSA2048', 'RSA4096', 'RSA8192'. Default to 'RSA4096'"` + OnHostRule bool `description:"Enable certificate generation on frontends Host rules."` + OnDemand bool `description:"Enable on demand certificate generation. This will request a certificate from Let's Encrypt during the first TLS handshake for a hostname that does not yet have a certificate."` // Deprecated + DNSChallenge *DNSChallenge `description:"Activate DNS-01 Challenge"` + HTTPChallenge *HTTPChallenge `description:"Activate HTTP-01 Challenge"` + TLSChallenge *TLSChallenge `description:"Activate TLS-ALPN-01 Challenge"` + Domains []types.Domain `description:"CN and SANs (alternative domains) to each main domain using format: --acme.domains='main.com,san1.com,san2.com' --acme.domains='*.main.net'. No SANs for wildcards domain. Wildcard domains only accepted with DNSChallenge"` +} + +// Provider holds configurations of the provider. +type Provider struct { + *Configuration + Store Store + certificates []*Certificate + account *Account + client *acme.Client + certsChan chan *Certificate + configurationChan chan<- types.ConfigMessage + certificateStore *traefiktls.CertificateStore + clientMutex sync.Mutex + configFromListenerChan chan types.Configuration + pool *safe.Pool + resolvingDomains map[string]struct{} + resolvingDomainsMutex sync.RWMutex +} + +// Certificate is a struct which contains all data needed from an ACME certificate +type Certificate struct { + Domain types.Domain + Certificate []byte + Key []byte +} + +// DNSChallenge contains DNS challenge Configuration +type DNSChallenge struct { + Provider string `description:"Use a DNS-01 based challenge provider rather than HTTPS."` + DelayBeforeCheck parse.Duration `description:"Assume DNS propagates after a delay in seconds rather than finding and querying nameservers."` + Resolvers types.DNSResolvers `description:"Use following DNS servers to resolve the FQDN authority."` + DisablePropagationCheck bool `description:"Disable the DNS propagation checks before notifying ACME that the DNS challenge is ready. [not recommended]"` + preCheckTimeout time.Duration + preCheckInterval time.Duration +} + +// HTTPChallenge contains HTTP challenge Configuration +type HTTPChallenge struct { + EntryPoint string `description:"HTTP challenge EntryPoint"` +} + +// TLSChallenge contains TLS challenge Configuration +type TLSChallenge struct{} + +// SetConfigListenerChan initializes the configFromListenerChan +func (p *Provider) SetConfigListenerChan(configFromListenerChan chan types.Configuration) { + p.configFromListenerChan = configFromListenerChan +} + +// SetCertificateStore allow to initialize certificate store +func (p *Provider) SetCertificateStore(certificateStore *traefiktls.CertificateStore) { + p.certificateStore = certificateStore +} + +// ListenConfiguration sets a new Configuration into the configFromListenerChan +func (p *Provider) ListenConfiguration(config types.Configuration) { + p.configFromListenerChan <- config +} + +// ListenRequest resolves new certificates for a domain from an incoming request and return a valid Certificate to serve (onDemand option) +func (p *Provider) ListenRequest(domain string) (*tls.Certificate, error) { + acmeCert, err := p.resolveCertificate(types.Domain{Main: domain}, false) + if acmeCert == nil || err != nil { + return nil, err + } + + certificate, err := tls.X509KeyPair(acmeCert.Certificate, acmeCert.PrivateKey) + + return &certificate, err +} + +// Init for compatibility reason the BaseProvider implements an empty Init +func (p *Provider) Init(_ types.Constraints) error { + acme.UserAgent = fmt.Sprintf("containous-traefik/%s", version.Version) + if p.ACMELogging { + legolog.Logger = fmtlog.New(log.WriterLevel(logrus.InfoLevel), "legolog: ", 0) + } else { + legolog.Logger = fmtlog.New(ioutil.Discard, "", 0) + } + + if p.Store == nil { + return errors.New("no store found for the ACME provider") + } + + var err error + p.account, err = p.Store.GetAccount() + if err != nil { + return fmt.Errorf("unable to get ACME account : %v", err) + } + + // Reset Account if caServer changed, thus registration URI can be updated + if p.account != nil && p.account.Registration != nil && !isAccountMatchingCaServer(p.account.Registration.URI, p.CAServer) { + log.Info("Account URI does not match the current CAServer. The account will be reset") + p.account = nil + } + + p.certificates, err = p.Store.GetCertificates() + if err != nil { + return fmt.Errorf("unable to get ACME certificates : %v", err) + } + + // Init the currently resolved domain map + p.resolvingDomains = make(map[string]struct{}) + + return nil +} + +func isAccountMatchingCaServer(accountURI string, serverURI string) bool { + aru, err := url.Parse(accountURI) + if err != nil { + log.Infof("Unable to parse account.Registration URL : %v", err) + return false + } + cau, err := url.Parse(serverURI) + if err != nil { + log.Infof("Unable to parse CAServer URL : %v", err) + return false + } + return cau.Hostname() == aru.Hostname() +} + +// Provide allows the file provider to provide configurations to traefik +// using the given Configuration channel. +func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { + p.pool = pool + + p.watchCertificate() + p.watchNewDomains() + + p.configurationChan = configurationChan + p.refreshCertificates() + + p.deleteUnnecessaryDomains() + for i := 0; i < len(p.Domains); i++ { + domain := p.Domains[i] + safe.Go(func() { + if _, err := p.resolveCertificate(domain, true); err != nil { + log.Errorf("Unable to obtain ACME certificate for domains %q : %v", strings.Join(domain.ToStrArray(), ","), err) + } + }) + } + + p.renewCertificates() + + ticker := time.NewTicker(24 * time.Hour) + pool.Go(func(stop chan bool) { + for { + select { + case <-ticker.C: + p.renewCertificates() + case <-stop: + ticker.Stop() + return + } + } + }) + + return nil +} + +func (p *Provider) getClient() (*acme.Client, error) { + p.clientMutex.Lock() + defer p.clientMutex.Unlock() + + if p.client != nil { + return p.client, nil + } + + account, err := p.initAccount() + if err != nil { + return nil, err + } + + log.Debug("Building ACME client...") + + caServer := "https://acme-v02.api.letsencrypt.org/directory" + if len(p.CAServer) > 0 { + caServer = p.CAServer + } + log.Debug(caServer) + + client, err := acme.NewClient(caServer, account, account.KeyType) + if err != nil { + return nil, err + } + + // New users will need to register; be sure to save it + if account.GetRegistration() == nil { + log.Info("Register...") + + reg, err := client.Register(true) + if err != nil { + return nil, err + } + + account.Registration = reg + } + + // Save the account once before all the certificates generation/storing + // No certificate can be generated if account is not initialized + err = p.Store.SaveAccount(account) + if err != nil { + return nil, err + } + + if p.DNSChallenge != nil && len(p.DNSChallenge.Provider) > 0 { + log.Debugf("Using DNS Challenge provider: %s", p.DNSChallenge.Provider) + + SetRecursiveNameServers(p.DNSChallenge.Resolvers) + SetPropagationCheck(p.DNSChallenge.DisablePropagationCheck) + + err = dnsOverrideDelay(p.DNSChallenge.DelayBeforeCheck) + if err != nil { + return nil, err + } + + var provider acme.ChallengeProvider + provider, err = dns.NewDNSChallengeProviderByName(p.DNSChallenge.Provider) + if err != nil { + return nil, err + } + + client.ExcludeChallenges([]acme.Challenge{acme.HTTP01, acme.TLSALPN01}) + + err = client.SetChallengeProvider(acme.DNS01, provider) + if err != nil { + return nil, err + } + + // Same default values than LEGO + p.DNSChallenge.preCheckTimeout = 60 * time.Second + p.DNSChallenge.preCheckInterval = 2 * time.Second + + // Set the precheck timeout into the DNSChallenge provider + if challengeProviderTimeout, ok := provider.(acme.ChallengeProviderTimeout); ok { + p.DNSChallenge.preCheckTimeout, p.DNSChallenge.preCheckInterval = challengeProviderTimeout.Timeout() + } + + } else if p.HTTPChallenge != nil && len(p.HTTPChallenge.EntryPoint) > 0 { + log.Debug("Using HTTP Challenge provider.") + + client.ExcludeChallenges([]acme.Challenge{acme.DNS01, acme.TLSALPN01}) + + err = client.SetChallengeProvider(acme.HTTP01, &challengeHTTP{Store: p.Store}) + if err != nil { + return nil, err + } + } else if p.TLSChallenge != nil { + log.Debug("Using TLS Challenge provider.") + + client.ExcludeChallenges([]acme.Challenge{acme.HTTP01, acme.DNS01}) + + err = client.SetChallengeProvider(acme.TLSALPN01, &challengeTLSALPN{Store: p.Store}) + if err != nil { + return nil, err + } + } else { + return nil, errors.New("ACME challenge not specified, please select TLS or HTTP or DNS Challenge") + } + + p.client = client + return p.client, nil +} + +func (p *Provider) initAccount() (*Account, error) { + if p.account == nil || len(p.account.Email) == 0 { + var err error + p.account, err = NewAccount(p.Email, p.KeyType) + if err != nil { + return nil, err + } + } + + // Set the KeyType if not already defined in the account + if len(p.account.KeyType) == 0 { + p.account.KeyType = GetKeyType(p.KeyType) + } + + return p.account, nil +} + +func contains(entryPoints []string, acmeEntryPoint string) bool { + for _, entryPoint := range entryPoints { + if entryPoint == acmeEntryPoint { + return true + } + } + return false +} + +func (p *Provider) watchNewDomains() { + p.pool.Go(func(stop chan bool) { + for { + select { + case config := <-p.configFromListenerChan: + for _, frontend := range config.Frontends { + if !contains(frontend.EntryPoints, p.EntryPoint) { + continue + } + for _, route := range frontend.Routes { + domainRules := rules.Rules{} + domains, err := domainRules.ParseDomains(route.Rule) + if err != nil { + log.Errorf("Error parsing domains in provider ACME: %v", err) + continue + } + + if len(domains) == 0 { + log.Debugf("No domain parsed in rule %q in provider ACME", route.Rule) + continue + } + + log.Debugf("Try to challenge certificate for domain %v founded in Host rule", domains) + + var domain types.Domain + if len(domains) > 0 { + domain = types.Domain{Main: domains[0]} + if len(domains) > 1 { + domain.SANs = domains[1:] + } + + safe.Go(func() { + if _, err := p.resolveCertificate(domain, false); err != nil { + log.Errorf("Unable to obtain ACME certificate for domains %q detected thanks to rule %q : %v", strings.Join(domains, ","), route.Rule, err) + } + }) + } + } + } + case <-stop: + return + } + } + }) +} + +func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurationFile bool) (*acme.CertificateResource, error) { + domains, err := p.getValidDomains(domain, domainFromConfigurationFile) + if err != nil { + return nil, err + } + + // Check provided certificates + uncheckedDomains := p.getUncheckedDomains(domains, !domainFromConfigurationFile) + if len(uncheckedDomains) == 0 { + return nil, nil + } + + p.addResolvingDomains(uncheckedDomains) + defer p.removeResolvingDomains(uncheckedDomains) + + log.Debugf("Loading ACME certificates %+v...", uncheckedDomains) + + client, err := p.getClient() + if err != nil { + return nil, fmt.Errorf("cannot get ACME client %v", err) + } + + var certificate *acme.CertificateResource + bundle := true + if p.useCertificateWithRetry(uncheckedDomains) { + certificate, err = obtainCertificateWithRetry(domains, client, p.DNSChallenge.preCheckTimeout, p.DNSChallenge.preCheckInterval, bundle) + } else { + certificate, err = client.ObtainCertificate(domains, bundle, nil, OSCPMustStaple) + } + + if err != nil { + return nil, fmt.Errorf("unable to generate a certificate for the domains %v: %v", uncheckedDomains, err) + } + if certificate == nil { + return nil, fmt.Errorf("domains %v do not generate a certificate", uncheckedDomains) + } + if len(certificate.Certificate) == 0 || len(certificate.PrivateKey) == 0 { + return nil, fmt.Errorf("domains %v generate certificate with no value: %v", uncheckedDomains, certificate) + } + + log.Debugf("Certificates obtained for domains %+v", uncheckedDomains) + + if len(uncheckedDomains) > 1 { + domain = types.Domain{Main: uncheckedDomains[0], SANs: uncheckedDomains[1:]} + } else { + domain = types.Domain{Main: uncheckedDomains[0]} + } + p.addCertificateForDomain(domain, certificate.Certificate, certificate.PrivateKey) + + return certificate, nil +} + +func (p *Provider) removeResolvingDomains(resolvingDomains []string) { + p.resolvingDomainsMutex.Lock() + defer p.resolvingDomainsMutex.Unlock() + + for _, domain := range resolvingDomains { + delete(p.resolvingDomains, domain) + } +} + +func (p *Provider) addResolvingDomains(resolvingDomains []string) { + p.resolvingDomainsMutex.Lock() + defer p.resolvingDomainsMutex.Unlock() + + for _, domain := range resolvingDomains { + p.resolvingDomains[domain] = struct{}{} + } +} + +func (p *Provider) useCertificateWithRetry(domains []string) bool { + // Check if we can use the retry mechanism only if we use the DNS Challenge and if is there are at least 2 domains to check + if p.DNSChallenge != nil && len(domains) > 1 { + rootDomain := "" + for _, searchWildcardDomain := range domains { + // Search a wildcard domain if not already found + if len(rootDomain) == 0 && strings.HasPrefix(searchWildcardDomain, "*.") { + rootDomain = strings.TrimPrefix(searchWildcardDomain, "*.") + if len(rootDomain) > 0 { + // Look for a root domain which matches the wildcard domain + for _, searchRootDomain := range domains { + if rootDomain == searchRootDomain { + // If the domains list contains a wildcard domain and its root domain, we can use the retry mechanism to obtain the certificate + return true + } + } + } + // There is only one wildcard domain in the slice, if its root domain has not been found, the retry mechanism does not have to be used + return false + } + } + } + + return false +} + +func obtainCertificateWithRetry(domains []string, client *acme.Client, timeout, interval time.Duration, bundle bool) (*acme.CertificateResource, error) { + var certificate *acme.CertificateResource + var err error + + operation := func() error { + certificate, err = client.ObtainCertificate(domains, bundle, nil, OSCPMustStaple) + return err + } + + notify := func(err error, time time.Duration) { + log.Errorf("Error obtaining certificate retrying in %s", time) + } + + // Define a retry backOff to let LEGO tries twice to obtain a certificate for both wildcard and root domain + ebo := backoff.NewExponentialBackOff() + ebo.MaxElapsedTime = 2 * timeout + ebo.MaxInterval = interval + rbo := backoff.WithMaxRetries(ebo, 2) + + err = backoff.RetryNotify(safe.OperationWithRecover(operation), rbo, notify) + if err != nil { + log.Errorf("Error obtaining certificate: %v", err) + return nil, err + } + + return certificate, nil +} + +func dnsOverrideDelay(delay parse.Duration) error { + if delay == 0 { + return nil + } + + if delay > 0 { + log.Debugf("Delaying %d rather than validating DNS propagation now.", delay) + + acme.PreCheckDNS = func(_, _ string) (bool, error) { + time.Sleep(time.Duration(delay)) + return true, nil + } + } else { + return fmt.Errorf("delayBeforeCheck: %d cannot be less than 0", delay) + } + return nil +} + +func (p *Provider) addCertificateForDomain(domain types.Domain, certificate []byte, key []byte) { + p.certsChan <- &Certificate{Certificate: certificate, Key: key, Domain: domain} +} + +// deleteUnnecessaryDomains deletes from the configuration : +// - Duplicated domains +// - Domains which are checked by wildcard domain +func (p *Provider) deleteUnnecessaryDomains() { + var newDomains []types.Domain + + for idxDomainToCheck, domainToCheck := range p.Domains { + keepDomain := true + + for idxDomain, domain := range p.Domains { + if idxDomainToCheck == idxDomain { + continue + } + + if reflect.DeepEqual(domain, domainToCheck) { + if idxDomainToCheck > idxDomain { + log.Warnf("The domain %v is duplicated in the configuration but will be process by ACME provider only once.", domainToCheck) + keepDomain = false + } + break + } + + // Check if CN or SANS to check already exists + // or can not be checked by a wildcard + var newDomainsToCheck []string + for _, domainProcessed := range domainToCheck.ToStrArray() { + if idxDomain < idxDomainToCheck && isDomainAlreadyChecked(domainProcessed, domain.ToStrArray()) { + // The domain is duplicated in a CN + log.Warnf("Domain %q is duplicated in the configuration or validated by the domain %v. It will be processed once.", domainProcessed, domain) + continue + } else if domain.Main != domainProcessed && strings.HasPrefix(domain.Main, "*") && isDomainAlreadyChecked(domainProcessed, []string{domain.Main}) { + // Check if a wildcard can validate the domain + log.Warnf("Domain %q will not be processed by ACME provider because it is validated by the wildcard %q", domainProcessed, domain.Main) + continue + } + newDomainsToCheck = append(newDomainsToCheck, domainProcessed) + } + + // Delete the domain if both Main and SANs can be validated by the wildcard domain + // otherwise keep the unchecked values + if newDomainsToCheck == nil { + keepDomain = false + break + } + domainToCheck.Set(newDomainsToCheck) + } + + if keepDomain { + newDomains = append(newDomains, domainToCheck) + } + } + + p.Domains = newDomains +} + +func (p *Provider) watchCertificate() { + p.certsChan = make(chan *Certificate) + p.pool.Go(func(stop chan bool) { + for { + select { + case cert := <-p.certsChan: + certUpdated := false + for _, domainsCertificate := range p.certificates { + if reflect.DeepEqual(cert.Domain, domainsCertificate.Domain) { + domainsCertificate.Certificate = cert.Certificate + domainsCertificate.Key = cert.Key + certUpdated = true + break + } + } + if !certUpdated { + p.certificates = append(p.certificates, cert) + } + + err := p.saveCertificates() + if err != nil { + log.Error(err) + } + + case <-stop: + return + } + } + }) +} + +func (p *Provider) saveCertificates() error { + err := p.Store.SaveCertificates(p.certificates) + + p.refreshCertificates() + + return err +} + +func (p *Provider) refreshCertificates() { + config := types.ConfigMessage{ + ProviderName: "ACME", + Configuration: &types.Configuration{ + Backends: map[string]*types.Backend{}, + Frontends: map[string]*types.Frontend{}, + TLS: []*traefiktls.Configuration{}, + }, + } + + for _, cert := range p.certificates { + certificate := &traefiktls.Certificate{CertFile: traefiktls.FileOrContent(cert.Certificate), KeyFile: traefiktls.FileOrContent(cert.Key)} + config.Configuration.TLS = append(config.Configuration.TLS, &traefiktls.Configuration{Certificate: certificate, EntryPoints: []string{p.EntryPoint}}) + } + p.configurationChan <- config +} + +func (p *Provider) renewCertificates() { + log.Info("Testing certificate renew...") + for _, certificate := range p.certificates { + crt, err := getX509Certificate(certificate) + // If there's an error, we assume the cert is broken, and needs update + // <= 30 days left, renew certificate + if err != nil || crt == nil || crt.NotAfter.Before(time.Now().Add(24*30*time.Hour)) { + client, err := p.getClient() + if err != nil { + log.Infof("Error renewing certificate from LE : %+v, %v", certificate.Domain, err) + continue + } + + log.Infof("Renewing certificate from LE : %+v", certificate.Domain) + + renewedCert, err := client.RenewCertificate(acme.CertificateResource{ + Domain: certificate.Domain.Main, + PrivateKey: certificate.Key, + Certificate: certificate.Certificate, + }, true, OSCPMustStaple) + + if err != nil { + log.Errorf("Error renewing certificate from LE: %v, %v", certificate.Domain, err) + continue + } + + if len(renewedCert.Certificate) == 0 || len(renewedCert.PrivateKey) == 0 { + log.Errorf("domains %v renew certificate with no value: %v", certificate.Domain.ToStrArray(), certificate) + continue + } + + p.addCertificateForDomain(certificate.Domain, renewedCert.Certificate, renewedCert.PrivateKey) + } + } +} + +// Get provided certificate which check a domains list (Main and SANs) +// from static and dynamic provided certificates +func (p *Provider) getUncheckedDomains(domainsToCheck []string, checkConfigurationDomains bool) []string { + p.resolvingDomainsMutex.RLock() + defer p.resolvingDomainsMutex.RUnlock() + + log.Debugf("Looking for provided certificate(s) to validate %q...", domainsToCheck) + + allDomains := p.certificateStore.GetAllDomains() + + // Get ACME certificates + for _, certificate := range p.certificates { + allDomains = append(allDomains, strings.Join(certificate.Domain.ToStrArray(), ",")) + } + + // Get currently resolved domains + for domain := range p.resolvingDomains { + allDomains = append(allDomains, domain) + } + + // Get Configuration Domains + if checkConfigurationDomains { + for i := 0; i < len(p.Domains); i++ { + allDomains = append(allDomains, strings.Join(p.Domains[i].ToStrArray(), ",")) + } + } + + return searchUncheckedDomains(domainsToCheck, allDomains) +} + +func searchUncheckedDomains(domainsToCheck []string, existentDomains []string) []string { + var uncheckedDomains []string + for _, domainToCheck := range domainsToCheck { + if !isDomainAlreadyChecked(domainToCheck, existentDomains) { + uncheckedDomains = append(uncheckedDomains, domainToCheck) + } + } + + if len(uncheckedDomains) == 0 { + log.Debugf("No ACME certificate generation required for domains %q.", domainsToCheck) + } else { + log.Debugf("Domains %q need ACME certificates generation for domains %q.", domainsToCheck, strings.Join(uncheckedDomains, ",")) + } + return uncheckedDomains +} + +func getX509Certificate(certificate *Certificate) (*x509.Certificate, error) { + tlsCert, err := tls.X509KeyPair(certificate.Certificate, certificate.Key) + if err != nil { + log.Errorf("Failed to load TLS keypair from ACME certificate for domain %q (SAN : %q), certificate will be renewed : %v", certificate.Domain.Main, strings.Join(certificate.Domain.SANs, ","), err) + return nil, err + } + + crt := tlsCert.Leaf + if crt == nil { + crt, err = x509.ParseCertificate(tlsCert.Certificate[0]) + if err != nil { + log.Errorf("Failed to parse TLS keypair from ACME certificate for domain %q (SAN : %q), certificate will be renewed : %v", certificate.Domain.Main, strings.Join(certificate.Domain.SANs, ","), err) + } + } + + return crt, err +} + +// getValidDomains checks if given domain is allowed to generate a ACME certificate and return it +func (p *Provider) getValidDomains(domain types.Domain, wildcardAllowed bool) ([]string, error) { + domains := domain.ToStrArray() + if len(domains) == 0 { + return nil, errors.New("unable to generate a certificate in ACME provider when no domain is given") + } + + if strings.HasPrefix(domain.Main, "*") { + if !wildcardAllowed { + return nil, fmt.Errorf("unable to generate a wildcard certificate in ACME provider for domain %q from a 'Host' rule", strings.Join(domains, ",")) + } + + if p.DNSChallenge == nil { + return nil, fmt.Errorf("unable to generate a wildcard certificate in ACME provider for domain %q : ACME needs a DNSChallenge", strings.Join(domains, ",")) + } + + if strings.HasPrefix(domain.Main, "*.*") { + return nil, fmt.Errorf("unable to generate a wildcard certificate in ACME provider for domain %q : ACME does not allow '*.*' wildcard domain", strings.Join(domains, ",")) + } + } + + for _, san := range domain.SANs { + if strings.HasPrefix(san, "*") { + return nil, fmt.Errorf("unable to generate a certificate in ACME provider for domains %q: SAN %q can not be a wildcard domain", strings.Join(domains, ","), san) + } + } + + var cleanDomains []string + for _, domain := range domains { + canonicalDomain := types.CanonicalDomain(domain) + cleanDomain := acme.UnFqdn(canonicalDomain) + if canonicalDomain != cleanDomain { + log.Warnf("FQDN detected, please remove the trailing dot: %s", canonicalDomain) + } + cleanDomains = append(cleanDomains, cleanDomain) + } + + return cleanDomains, nil +} + +func isDomainAlreadyChecked(domainToCheck string, existentDomains []string) bool { + for _, certDomains := range existentDomains { + for _, certDomain := range strings.Split(certDomains, ",") { + if types.MatchDomain(domainToCheck, certDomain) { + return true + } + } + } + return false +} + +// SetPropagationCheck to disable the Lego PreCheck. +func SetPropagationCheck(disable bool) { + if disable { + acme.PreCheckDNS = func(_, _ string) (bool, error) { + return true, nil + } + } +} + +// SetRecursiveNameServers to provide a custom DNS resolver. +func SetRecursiveNameServers(dnsResolvers []string) { + resolvers := normaliseDNSResolvers(dnsResolvers) + if len(resolvers) > 0 { + acme.RecursiveNameservers = resolvers + log.Infof("Validating FQDN authority with DNS using %+v", resolvers) + } +} + +// ensure all servers have a port number +func normaliseDNSResolvers(dnsResolvers []string) []string { + var normalisedResolvers []string + for _, server := range dnsResolvers { + srv := strings.TrimSpace(server) + if len(srv) > 0 { + if host, port, err := net.SplitHostPort(srv); err != nil { + normalisedResolvers = append(normalisedResolvers, net.JoinHostPort(srv, "53")) + } else { + normalisedResolvers = append(normalisedResolvers, net.JoinHostPort(host, port)) + } + } + } + return normalisedResolvers +} diff --git a/old/provider/acme/provider_test.go b/old/provider/acme/provider_test.go new file mode 100644 index 000000000..591726b90 --- /dev/null +++ b/old/provider/acme/provider_test.go @@ -0,0 +1,684 @@ +package acme + +import ( + "crypto/tls" + "testing" + + "github.com/containous/traefik/old/types" + "github.com/containous/traefik/safe" + traefiktls "github.com/containous/traefik/tls" + "github.com/stretchr/testify/assert" + "github.com/xenolf/lego/acme" +) + +func TestGetUncheckedCertificates(t *testing.T) { + wildcardMap := make(map[string]*tls.Certificate) + wildcardMap["*.traefik.wtf"] = &tls.Certificate{} + + wildcardSafe := &safe.Safe{} + wildcardSafe.Set(wildcardMap) + + domainMap := make(map[string]*tls.Certificate) + domainMap["traefik.wtf"] = &tls.Certificate{} + + domainSafe := &safe.Safe{} + domainSafe.Set(domainMap) + + testCases := []struct { + desc string + dynamicCerts *safe.Safe + staticCerts *safe.Safe + resolvingDomains map[string]struct{} + acmeCertificates []*Certificate + domains []string + expectedDomains []string + }{ + { + desc: "wildcard to generate", + domains: []string{"*.traefik.wtf"}, + expectedDomains: []string{"*.traefik.wtf"}, + }, + { + desc: "wildcard already exists in dynamic certificates", + domains: []string{"*.traefik.wtf"}, + dynamicCerts: wildcardSafe, + expectedDomains: nil, + }, + { + desc: "wildcard already exists in static certificates", + domains: []string{"*.traefik.wtf"}, + staticCerts: wildcardSafe, + expectedDomains: nil, + }, + { + desc: "wildcard already exists in ACME certificates", + domains: []string{"*.traefik.wtf"}, + acmeCertificates: []*Certificate{ + { + Domain: types.Domain{Main: "*.traefik.wtf"}, + }, + }, + expectedDomains: nil, + }, + { + desc: "domain CN and SANs to generate", + domains: []string{"traefik.wtf", "foo.traefik.wtf"}, + expectedDomains: []string{"traefik.wtf", "foo.traefik.wtf"}, + }, + { + desc: "domain CN already exists in dynamic certificates and SANs to generate", + domains: []string{"traefik.wtf", "foo.traefik.wtf"}, + dynamicCerts: domainSafe, + expectedDomains: []string{"foo.traefik.wtf"}, + }, + { + desc: "domain CN already exists in static certificates and SANs to generate", + domains: []string{"traefik.wtf", "foo.traefik.wtf"}, + staticCerts: domainSafe, + expectedDomains: []string{"foo.traefik.wtf"}, + }, + { + desc: "domain CN already exists in ACME certificates and SANs to generate", + domains: []string{"traefik.wtf", "foo.traefik.wtf"}, + acmeCertificates: []*Certificate{ + { + Domain: types.Domain{Main: "traefik.wtf"}, + }, + }, + expectedDomains: []string{"foo.traefik.wtf"}, + }, + { + desc: "domain already exists in dynamic certificates", + domains: []string{"traefik.wtf"}, + dynamicCerts: domainSafe, + expectedDomains: nil, + }, + { + desc: "domain already exists in static certificates", + domains: []string{"traefik.wtf"}, + staticCerts: domainSafe, + expectedDomains: nil, + }, + { + desc: "domain already exists in ACME certificates", + domains: []string{"traefik.wtf"}, + acmeCertificates: []*Certificate{ + { + Domain: types.Domain{Main: "traefik.wtf"}, + }, + }, + expectedDomains: nil, + }, + { + desc: "domain matched by wildcard in dynamic certificates", + domains: []string{"who.traefik.wtf", "foo.traefik.wtf"}, + dynamicCerts: wildcardSafe, + expectedDomains: nil, + }, + { + desc: "domain matched by wildcard in static certificates", + domains: []string{"who.traefik.wtf", "foo.traefik.wtf"}, + staticCerts: wildcardSafe, + expectedDomains: nil, + }, + { + desc: "domain matched by wildcard in ACME certificates", + domains: []string{"who.traefik.wtf", "foo.traefik.wtf"}, + acmeCertificates: []*Certificate{ + { + Domain: types.Domain{Main: "*.traefik.wtf"}, + }, + }, + expectedDomains: nil, + }, + { + desc: "root domain with wildcard in ACME certificates", + domains: []string{"traefik.wtf", "foo.traefik.wtf"}, + acmeCertificates: []*Certificate{ + { + Domain: types.Domain{Main: "*.traefik.wtf"}, + }, + }, + expectedDomains: []string{"traefik.wtf"}, + }, + { + desc: "all domains already managed by ACME", + domains: []string{"traefik.wtf", "foo.traefik.wtf"}, + resolvingDomains: map[string]struct{}{ + "traefik.wtf": {}, + "foo.traefik.wtf": {}, + }, + expectedDomains: []string{}, + }, + { + desc: "one domain already managed by ACME", + domains: []string{"traefik.wtf", "foo.traefik.wtf"}, + resolvingDomains: map[string]struct{}{ + "traefik.wtf": {}, + }, + expectedDomains: []string{"foo.traefik.wtf"}, + }, + { + desc: "wildcard domain already managed by ACME checks the domains", + domains: []string{"bar.traefik.wtf", "foo.traefik.wtf"}, + resolvingDomains: map[string]struct{}{ + "*.traefik.wtf": {}, + }, + expectedDomains: []string{}, + }, + { + desc: "wildcard domain already managed by ACME checks domains and another domain checks one other domain, one domain still unchecked", + domains: []string{"traefik.wtf", "bar.traefik.wtf", "foo.traefik.wtf", "acme.wtf"}, + resolvingDomains: map[string]struct{}{ + "*.traefik.wtf": {}, + "traefik.wtf": {}, + }, + expectedDomains: []string{"acme.wtf"}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + if test.resolvingDomains == nil { + test.resolvingDomains = make(map[string]struct{}) + } + + acmeProvider := Provider{ + certificateStore: &traefiktls.CertificateStore{ + DynamicCerts: test.dynamicCerts, + StaticCerts: test.staticCerts, + }, + certificates: test.acmeCertificates, + resolvingDomains: test.resolvingDomains, + } + + domains := acmeProvider.getUncheckedDomains(test.domains, false) + assert.Equal(t, len(test.expectedDomains), len(domains), "Unexpected domains.") + }) + } +} + +func TestGetValidDomain(t *testing.T) { + testCases := []struct { + desc string + domains types.Domain + wildcardAllowed bool + dnsChallenge *DNSChallenge + expectedErr string + expectedDomains []string + }{ + { + desc: "valid wildcard", + domains: types.Domain{Main: "*.traefik.wtf"}, + dnsChallenge: &DNSChallenge{}, + wildcardAllowed: true, + expectedErr: "", + expectedDomains: []string{"*.traefik.wtf"}, + }, + { + desc: "no wildcard", + domains: types.Domain{Main: "traefik.wtf", SANs: []string{"foo.traefik.wtf"}}, + dnsChallenge: &DNSChallenge{}, + expectedErr: "", + wildcardAllowed: true, + expectedDomains: []string{"traefik.wtf", "foo.traefik.wtf"}, + }, + { + desc: "unauthorized wildcard", + domains: types.Domain{Main: "*.traefik.wtf"}, + dnsChallenge: &DNSChallenge{}, + wildcardAllowed: false, + expectedErr: "unable to generate a wildcard certificate in ACME provider for domain \"*.traefik.wtf\" from a 'Host' rule", + expectedDomains: nil, + }, + { + desc: "no domain", + domains: types.Domain{}, + dnsChallenge: nil, + wildcardAllowed: true, + expectedErr: "unable to generate a certificate in ACME provider when no domain is given", + expectedDomains: nil, + }, + { + desc: "no DNSChallenge", + domains: types.Domain{Main: "*.traefik.wtf", SANs: []string{"foo.traefik.wtf"}}, + dnsChallenge: nil, + wildcardAllowed: true, + expectedErr: "unable to generate a wildcard certificate in ACME provider for domain \"*.traefik.wtf,foo.traefik.wtf\" : ACME needs a DNSChallenge", + expectedDomains: nil, + }, + { + desc: "unauthorized wildcard with SAN", + domains: types.Domain{Main: "*.*.traefik.wtf", SANs: []string{"foo.traefik.wtf"}}, + dnsChallenge: &DNSChallenge{}, + wildcardAllowed: true, + expectedErr: "unable to generate a wildcard certificate in ACME provider for domain \"*.*.traefik.wtf,foo.traefik.wtf\" : ACME does not allow '*.*' wildcard domain", + expectedDomains: nil, + }, + { + desc: "wildcard and SANs", + domains: types.Domain{Main: "*.traefik.wtf", SANs: []string{"traefik.wtf"}}, + dnsChallenge: &DNSChallenge{}, + wildcardAllowed: true, + expectedErr: "", + expectedDomains: []string{"*.traefik.wtf", "traefik.wtf"}, + }, + { + desc: "unexpected SANs", + domains: types.Domain{Main: "*.traefik.wtf", SANs: []string{"*.acme.wtf"}}, + dnsChallenge: &DNSChallenge{}, + wildcardAllowed: true, + expectedErr: "unable to generate a certificate in ACME provider for domains \"*.traefik.wtf,*.acme.wtf\": SAN \"*.acme.wtf\" can not be a wildcard domain", + expectedDomains: nil, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + acmeProvider := Provider{Configuration: &Configuration{DNSChallenge: test.dnsChallenge}} + + domains, err := acmeProvider.getValidDomains(test.domains, test.wildcardAllowed) + + if len(test.expectedErr) > 0 { + assert.EqualError(t, err, test.expectedErr, "Unexpected error.") + } else { + assert.Equal(t, len(test.expectedDomains), len(domains), "Unexpected domains.") + } + }) + } +} + +func TestDeleteUnnecessaryDomains(t *testing.T) { + testCases := []struct { + desc string + domains []types.Domain + expectedDomains []types.Domain + }{ + { + desc: "no domain to delete", + domains: []types.Domain{ + { + Main: "acme.wtf", + SANs: []string{"traefik.acme.wtf", "foo.bar"}, + }, + { + Main: "*.foo.acme.wtf", + }, + { + Main: "acme02.wtf", + SANs: []string{"traefik.acme02.wtf", "bar.foo"}, + }, + }, + expectedDomains: []types.Domain{ + { + Main: "acme.wtf", + SANs: []string{"traefik.acme.wtf", "foo.bar"}, + }, + { + Main: "*.foo.acme.wtf", + SANs: []string{}, + }, + { + Main: "acme02.wtf", + SANs: []string{"traefik.acme02.wtf", "bar.foo"}, + }, + }, + }, + { + desc: "wildcard and root domain", + domains: []types.Domain{ + { + Main: "acme.wtf", + }, + { + Main: "*.acme.wtf", + SANs: []string{"acme.wtf"}, + }, + }, + expectedDomains: []types.Domain{ + { + Main: "acme.wtf", + SANs: []string{}, + }, + { + Main: "*.acme.wtf", + SANs: []string{}, + }, + }, + }, + { + desc: "2 equals domains", + domains: []types.Domain{ + { + Main: "acme.wtf", + SANs: []string{"traefik.acme.wtf", "foo.bar"}, + }, + { + Main: "acme.wtf", + SANs: []string{"traefik.acme.wtf", "foo.bar"}, + }, + }, + expectedDomains: []types.Domain{ + { + Main: "acme.wtf", + SANs: []string{"traefik.acme.wtf", "foo.bar"}, + }, + }, + }, + { + desc: "2 domains with same values", + domains: []types.Domain{ + { + Main: "acme.wtf", + SANs: []string{"traefik.acme.wtf"}, + }, + { + Main: "acme.wtf", + SANs: []string{"traefik.acme.wtf", "foo.bar"}, + }, + }, + expectedDomains: []types.Domain{ + { + Main: "acme.wtf", + SANs: []string{"traefik.acme.wtf"}, + }, + { + Main: "foo.bar", + SANs: []string{}, + }, + }, + }, + { + desc: "domain totally checked by wildcard", + domains: []types.Domain{ + { + Main: "who.acme.wtf", + SANs: []string{"traefik.acme.wtf", "bar.acme.wtf"}, + }, + { + Main: "*.acme.wtf", + }, + }, + expectedDomains: []types.Domain{ + { + Main: "*.acme.wtf", + SANs: []string{}, + }, + }, + }, + { + desc: "duplicated wildcard", + domains: []types.Domain{ + { + Main: "*.acme.wtf", + SANs: []string{"acme.wtf"}, + }, + { + Main: "*.acme.wtf", + }, + }, + expectedDomains: []types.Domain{ + { + Main: "*.acme.wtf", + SANs: []string{"acme.wtf"}, + }, + }, + }, + { + desc: "domain partially checked by wildcard", + domains: []types.Domain{ + { + Main: "traefik.acme.wtf", + SANs: []string{"acme.wtf", "foo.bar"}, + }, + { + Main: "*.acme.wtf", + }, + { + Main: "who.acme.wtf", + SANs: []string{"traefik.acme.wtf", "bar.acme.wtf"}, + }, + }, + expectedDomains: []types.Domain{ + { + Main: "acme.wtf", + SANs: []string{"foo.bar"}, + }, + { + Main: "*.acme.wtf", + SANs: []string{}, + }, + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + acmeProvider := Provider{Configuration: &Configuration{Domains: test.domains}} + + acmeProvider.deleteUnnecessaryDomains() + assert.Equal(t, test.expectedDomains, acmeProvider.Domains, "unexpected domain") + }) + } +} + +func TestIsAccountMatchingCaServer(t *testing.T) { + testCases := []struct { + desc string + accountURI string + serverURI string + expected bool + }{ + { + desc: "acme staging with matching account", + accountURI: "https://acme-staging-v02.api.letsencrypt.org/acme/acct/1234567", + serverURI: "https://acme-staging-v02.api.letsencrypt.org/acme/directory", + expected: true, + }, + { + desc: "acme production with matching account", + accountURI: "https://acme-v02.api.letsencrypt.org/acme/acct/1234567", + serverURI: "https://acme-v02.api.letsencrypt.org/acme/directory", + expected: true, + }, + { + desc: "http only acme with matching account", + accountURI: "http://acme.api.letsencrypt.org/acme/acct/1234567", + serverURI: "http://acme.api.letsencrypt.org/acme/directory", + expected: true, + }, + { + desc: "different subdomains for account and server", + accountURI: "https://test1.example.org/acme/acct/1234567", + serverURI: "https://test2.example.org/acme/directory", + expected: false, + }, + { + desc: "different domains for account and server", + accountURI: "https://test.example1.org/acme/acct/1234567", + serverURI: "https://test.example2.org/acme/directory", + expected: false, + }, + { + desc: "different tld for account and server", + accountURI: "https://test.example.com/acme/acct/1234567", + serverURI: "https://test.example.org/acme/directory", + expected: false, + }, + { + desc: "malformed account url", + accountURI: "//|\\/test.example.com/acme/acct/1234567", + serverURI: "https://test.example.com/acme/directory", + expected: false, + }, + { + desc: "malformed server url", + accountURI: "https://test.example.com/acme/acct/1234567", + serverURI: "//|\\/test.example.com/acme/directory", + expected: false, + }, + { + desc: "malformed server and account url", + accountURI: "//|\\/test.example.com/acme/acct/1234567", + serverURI: "//|\\/test.example.com/acme/directory", + expected: false, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + result := isAccountMatchingCaServer(test.accountURI, test.serverURI) + + assert.Equal(t, test.expected, result) + }) + } +} + +func TestUseBackOffToObtainCertificate(t *testing.T) { + testCases := []struct { + desc string + domains []string + dnsChallenge *DNSChallenge + expectedResponse bool + }{ + { + desc: "only one single domain", + domains: []string{"acme.wtf"}, + dnsChallenge: &DNSChallenge{}, + expectedResponse: false, + }, + { + desc: "only one wildcard domain", + domains: []string{"*.acme.wtf"}, + dnsChallenge: &DNSChallenge{}, + expectedResponse: false, + }, + { + desc: "wildcard domain with no root domain", + domains: []string{"*.acme.wtf", "foo.acme.wtf", "bar.acme.wtf", "foo.bar"}, + dnsChallenge: &DNSChallenge{}, + expectedResponse: false, + }, + { + desc: "wildcard and root domain", + domains: []string{"*.acme.wtf", "foo.acme.wtf", "bar.acme.wtf", "acme.wtf"}, + dnsChallenge: &DNSChallenge{}, + expectedResponse: true, + }, + { + desc: "wildcard and root domain but no DNS challenge", + domains: []string{"*.acme.wtf", "acme.wtf"}, + dnsChallenge: nil, + expectedResponse: false, + }, + { + desc: "two wildcard domains (must never happen)", + domains: []string{"*.acme.wtf", "*.bar.foo"}, + dnsChallenge: nil, + expectedResponse: false, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + acmeProvider := Provider{Configuration: &Configuration{DNSChallenge: test.dnsChallenge}} + + actualResponse := acmeProvider.useCertificateWithRetry(test.domains) + assert.Equal(t, test.expectedResponse, actualResponse, "unexpected response to use backOff") + }) + } +} + +func TestInitAccount(t *testing.T) { + testCases := []struct { + desc string + account *Account + email string + keyType string + expectedAccount *Account + }{ + { + desc: "Existing account with all information", + account: &Account{ + Email: "foo@foo.net", + KeyType: acme.EC256, + }, + expectedAccount: &Account{ + Email: "foo@foo.net", + KeyType: acme.EC256, + }, + }, + { + desc: "Account nil", + email: "foo@foo.net", + keyType: "EC256", + expectedAccount: &Account{ + Email: "foo@foo.net", + KeyType: acme.EC256, + }, + }, + { + desc: "Existing account with no email", + account: &Account{ + KeyType: acme.RSA4096, + }, + email: "foo@foo.net", + keyType: "EC256", + expectedAccount: &Account{ + Email: "foo@foo.net", + KeyType: acme.EC256, + }, + }, + { + desc: "Existing account with no key type", + account: &Account{ + Email: "foo@foo.net", + }, + email: "bar@foo.net", + keyType: "EC256", + expectedAccount: &Account{ + Email: "foo@foo.net", + KeyType: acme.EC256, + }, + }, + { + desc: "Existing account and provider with no key type", + account: &Account{ + Email: "foo@foo.net", + }, + email: "bar@foo.net", + expectedAccount: &Account{ + Email: "foo@foo.net", + KeyType: acme.RSA4096, + }, + }, + } + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + acmeProvider := Provider{account: test.account, Configuration: &Configuration{Email: test.email, KeyType: test.keyType}} + + actualAccount, err := acmeProvider.initAccount() + assert.Nil(t, err, "Init account in error") + assert.Equal(t, test.expectedAccount.Email, actualAccount.Email, "unexpected email account") + assert.Equal(t, test.expectedAccount.KeyType, actualAccount.KeyType, "unexpected keyType account") + }) + } +} diff --git a/old/provider/acme/store.go b/old/provider/acme/store.go new file mode 100644 index 000000000..7573d1635 --- /dev/null +++ b/old/provider/acme/store.go @@ -0,0 +1,25 @@ +package acme + +// StoredData represents the data managed by the Store +type StoredData struct { + Account *Account + Certificates []*Certificate + HTTPChallenges map[string]map[string][]byte + TLSChallenges map[string]*Certificate +} + +// Store is a generic interface to represents a storage +type Store interface { + GetAccount() (*Account, error) + SaveAccount(*Account) error + GetCertificates() ([]*Certificate, error) + SaveCertificates([]*Certificate) error + + GetHTTPChallengeToken(token, domain string) ([]byte, error) + SetHTTPChallengeToken(token, domain string, keyAuth []byte) error + RemoveHTTPChallengeToken(token, domain string) error + + AddTLSChallenge(domain string, cert *Certificate) error + GetTLSChallenge(domain string) (*Certificate, error) + RemoveTLSChallenge(domain string) error +} diff --git a/provider/boltdb/boltdb.go b/old/provider/boltdb/boltdb.go similarity index 88% rename from provider/boltdb/boltdb.go rename to old/provider/boltdb/boltdb.go index df6a0cb19..d190264a2 100644 --- a/provider/boltdb/boltdb.go +++ b/old/provider/boltdb/boltdb.go @@ -5,10 +5,10 @@ import ( "github.com/abronan/valkeyrie/store" "github.com/abronan/valkeyrie/store/boltdb" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/kv" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/kv" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" ) var _ provider.Provider = (*Provider)(nil) diff --git a/provider/consul/consul.go b/old/provider/consul/consul.go similarity index 88% rename from provider/consul/consul.go rename to old/provider/consul/consul.go index 3004e1b5d..1e25d2994 100644 --- a/provider/consul/consul.go +++ b/old/provider/consul/consul.go @@ -5,10 +5,10 @@ import ( "github.com/abronan/valkeyrie/store" "github.com/abronan/valkeyrie/store/consul" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/kv" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/kv" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" ) var _ provider.Provider = (*Provider)(nil) diff --git a/provider/consulcatalog/config.go b/old/provider/consulcatalog/config.go similarity index 97% rename from provider/consulcatalog/config.go rename to old/provider/consulcatalog/config.go index 74a8afd3f..e6eefac96 100644 --- a/provider/consulcatalog/config.go +++ b/old/provider/consulcatalog/config.go @@ -11,10 +11,10 @@ import ( "strings" "text/template" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/hashicorp/consul/api" ) diff --git a/provider/consulcatalog/config_test.go b/old/provider/consulcatalog/config_test.go similarity index 99% rename from provider/consulcatalog/config_test.go rename to old/provider/consulcatalog/config_test.go index e5659e018..64b2f7acb 100644 --- a/provider/consulcatalog/config_test.go +++ b/old/provider/consulcatalog/config_test.go @@ -6,8 +6,8 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/hashicorp/consul/api" "github.com/stretchr/testify/assert" ) diff --git a/provider/consulcatalog/consul_catalog.go b/old/provider/consulcatalog/consul_catalog.go similarity index 99% rename from provider/consulcatalog/consul_catalog.go rename to old/provider/consulcatalog/consul_catalog.go index 297096206..2a9943ccf 100644 --- a/provider/consulcatalog/consul_catalog.go +++ b/old/provider/consulcatalog/consul_catalog.go @@ -11,11 +11,11 @@ import ( "github.com/BurntSushi/ty/fun" "github.com/cenk/backoff" "github.com/containous/traefik/job" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" "github.com/hashicorp/consul/api" ) diff --git a/provider/consulcatalog/consul_catalog_test.go b/old/provider/consulcatalog/consul_catalog_test.go similarity index 100% rename from provider/consulcatalog/consul_catalog_test.go rename to old/provider/consulcatalog/consul_catalog_test.go diff --git a/provider/consulcatalog/convert_types.go b/old/provider/consulcatalog/convert_types.go similarity index 91% rename from provider/consulcatalog/convert_types.go rename to old/provider/consulcatalog/convert_types.go index edb320087..5bce57cb8 100644 --- a/provider/consulcatalog/convert_types.go +++ b/old/provider/consulcatalog/convert_types.go @@ -3,7 +3,7 @@ package consulcatalog import ( "strings" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" ) func tagsToNeutralLabels(tags []string, prefix string) map[string]string { diff --git a/provider/consulcatalog/convert_types_test.go b/old/provider/consulcatalog/convert_types_test.go similarity index 100% rename from provider/consulcatalog/convert_types_test.go rename to old/provider/consulcatalog/convert_types_test.go diff --git a/provider/docker/builder_test.go b/old/provider/docker/builder_test.go similarity index 100% rename from provider/docker/builder_test.go rename to old/provider/docker/builder_test.go diff --git a/provider/docker/config.go b/old/provider/docker/config.go similarity index 98% rename from provider/docker/config.go rename to old/provider/docker/config.go index 869e96ad3..6d0292e60 100644 --- a/provider/docker/config.go +++ b/old/provider/docker/config.go @@ -11,10 +11,10 @@ import ( "text/template" "github.com/BurntSushi/ty/fun" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/docker/go-connections/nat" ) diff --git a/provider/docker/config_container_docker_test.go b/old/provider/docker/config_container_docker_test.go similarity index 99% rename from provider/docker/config_container_docker_test.go rename to old/provider/docker/config_container_docker_test.go index e417f0bc7..6989dd8a8 100644 --- a/provider/docker/config_container_docker_test.go +++ b/old/provider/docker/config_container_docker_test.go @@ -6,8 +6,8 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" docker "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/go-connections/nat" diff --git a/provider/docker/config_container_swarm_test.go b/old/provider/docker/config_container_swarm_test.go similarity index 99% rename from provider/docker/config_container_swarm_test.go rename to old/provider/docker/config_container_swarm_test.go index 96b9cf8c4..4a83ad902 100644 --- a/provider/docker/config_container_swarm_test.go +++ b/old/provider/docker/config_container_swarm_test.go @@ -6,8 +6,8 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" docker "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" "github.com/stretchr/testify/assert" diff --git a/provider/docker/config_segment_test.go b/old/provider/docker/config_segment_test.go similarity index 99% rename from provider/docker/config_segment_test.go rename to old/provider/docker/config_segment_test.go index 810e0e6a1..24e49f2f9 100644 --- a/provider/docker/config_segment_test.go +++ b/old/provider/docker/config_segment_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" docker "github.com/docker/docker/api/types" "github.com/docker/go-connections/nat" "github.com/stretchr/testify/assert" diff --git a/provider/docker/docker.go b/old/provider/docker/docker.go similarity index 99% rename from provider/docker/docker.go rename to old/provider/docker/docker.go index d3fa64805..0997cdb50 100644 --- a/provider/docker/docker.go +++ b/old/provider/docker/docker.go @@ -11,10 +11,10 @@ import ( "github.com/cenk/backoff" "github.com/containous/traefik/job" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" "github.com/containous/traefik/version" dockertypes "github.com/docker/docker/api/types" dockercontainertypes "github.com/docker/docker/api/types/container" diff --git a/provider/docker/docker_unix.go b/old/provider/docker/docker_unix.go similarity index 100% rename from provider/docker/docker_unix.go rename to old/provider/docker/docker_unix.go diff --git a/provider/docker/docker_windows.go b/old/provider/docker/docker_windows.go similarity index 100% rename from provider/docker/docker_windows.go rename to old/provider/docker/docker_windows.go diff --git a/provider/docker/swarm_test.go b/old/provider/docker/swarm_test.go similarity index 100% rename from provider/docker/swarm_test.go rename to old/provider/docker/swarm_test.go diff --git a/provider/dynamodb/dynamodb.go b/old/provider/dynamodb/dynamodb.go similarity index 98% rename from provider/dynamodb/dynamodb.go rename to old/provider/dynamodb/dynamodb.go index 2de67e35d..f280994da 100644 --- a/provider/dynamodb/dynamodb.go +++ b/old/provider/dynamodb/dynamodb.go @@ -14,10 +14,10 @@ import ( "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" "github.com/cenk/backoff" "github.com/containous/traefik/job" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" ) var _ provider.Provider = (*Provider)(nil) diff --git a/provider/dynamodb/dynamodb_test.go b/old/provider/dynamodb/dynamodb_test.go similarity index 98% rename from provider/dynamodb/dynamodb_test.go rename to old/provider/dynamodb/dynamodb_test.go index 88878f7ef..2c025e668 100644 --- a/provider/dynamodb/dynamodb_test.go +++ b/old/provider/dynamodb/dynamodb_test.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/types" ) type mockDynamoDBClient struct { diff --git a/provider/ecs/builder_test.go b/old/provider/ecs/builder_test.go similarity index 100% rename from provider/ecs/builder_test.go rename to old/provider/ecs/builder_test.go diff --git a/provider/ecs/cluster.go b/old/provider/ecs/cluster.go similarity index 100% rename from provider/ecs/cluster.go rename to old/provider/ecs/cluster.go diff --git a/provider/ecs/cluster_test.go b/old/provider/ecs/cluster_test.go similarity index 100% rename from provider/ecs/cluster_test.go rename to old/provider/ecs/cluster_test.go diff --git a/provider/ecs/config.go b/old/provider/ecs/config.go similarity index 97% rename from provider/ecs/config.go rename to old/provider/ecs/config.go index a225beb0f..535ae16d2 100644 --- a/provider/ecs/config.go +++ b/old/provider/ecs/config.go @@ -11,10 +11,10 @@ import ( "github.com/BurntSushi/ty/fun" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" ) // buildConfiguration fills the config template with the given instances diff --git a/provider/ecs/config_segment_test.go b/old/provider/ecs/config_segment_test.go similarity index 99% rename from provider/ecs/config_segment_test.go rename to old/provider/ecs/config_segment_test.go index 80246ec3c..3e47b3bc4 100644 --- a/provider/ecs/config_segment_test.go +++ b/old/provider/ecs/config_segment_test.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/provider/ecs/config_test.go b/old/provider/ecs/config_test.go similarity index 99% rename from provider/ecs/config_test.go rename to old/provider/ecs/config_test.go index bed7c9ac4..1e708fd57 100644 --- a/provider/ecs/config_test.go +++ b/old/provider/ecs/config_test.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ecs" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/stretchr/testify/assert" ) diff --git a/provider/ecs/ecs.go b/old/provider/ecs/ecs.go similarity index 98% rename from provider/ecs/ecs.go rename to old/provider/ecs/ecs.go index c34e5a76a..6b9d4e7eb 100644 --- a/provider/ecs/ecs.go +++ b/old/provider/ecs/ecs.go @@ -15,10 +15,10 @@ import ( "github.com/aws/aws-sdk-go/service/ecs" "github.com/cenk/backoff" "github.com/containous/traefik/job" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" ) var _ provider.Provider = (*Provider)(nil) diff --git a/provider/ecs/ecs_test.go b/old/provider/ecs/ecs_test.go similarity index 100% rename from provider/ecs/ecs_test.go rename to old/provider/ecs/ecs_test.go diff --git a/provider/etcd/etcd.go b/old/provider/etcd/etcd.go similarity index 88% rename from provider/etcd/etcd.go rename to old/provider/etcd/etcd.go index 132346caa..0c0859f5b 100644 --- a/provider/etcd/etcd.go +++ b/old/provider/etcd/etcd.go @@ -5,10 +5,10 @@ import ( "github.com/abronan/valkeyrie/store" "github.com/abronan/valkeyrie/store/etcd/v3" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/kv" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/kv" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" ) var _ provider.Provider = (*Provider)(nil) diff --git a/provider/eureka/config.go b/old/provider/eureka/config.go similarity index 88% rename from provider/eureka/config.go rename to old/provider/eureka/config.go index f5bfa99e4..7f1a71494 100644 --- a/provider/eureka/config.go +++ b/old/provider/eureka/config.go @@ -5,10 +5,10 @@ import ( "text/template" "github.com/ArthurHlt/go-eureka-client/eureka" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" ) // Build the configuration from Provider server diff --git a/provider/eureka/config_test.go b/old/provider/eureka/config_test.go similarity index 98% rename from provider/eureka/config_test.go rename to old/provider/eureka/config_test.go index 222c7898d..101b4ad17 100644 --- a/provider/eureka/config_test.go +++ b/old/provider/eureka/config_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/ArthurHlt/go-eureka-client/eureka" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" "github.com/stretchr/testify/assert" ) diff --git a/provider/eureka/eureka.go b/old/provider/eureka/eureka.go similarity index 95% rename from provider/eureka/eureka.go rename to old/provider/eureka/eureka.go index a67f1df65..1d6b25a44 100644 --- a/provider/eureka/eureka.go +++ b/old/provider/eureka/eureka.go @@ -8,10 +8,10 @@ import ( "github.com/cenk/backoff" "github.com/containous/flaeg/parse" "github.com/containous/traefik/job" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" ) // Provider holds configuration of the Provider provider. diff --git a/old/provider/file/file.go b/old/provider/file/file.go new file mode 100644 index 000000000..48ca72e4b --- /dev/null +++ b/old/provider/file/file.go @@ -0,0 +1,253 @@ +package file + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + "text/template" + + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/types" + "github.com/containous/traefik/safe" + "github.com/containous/traefik/tls" + "github.com/pkg/errors" + "gopkg.in/fsnotify.v1" +) + +var _ provider.Provider = (*Provider)(nil) + +// Provider holds configurations of the provider. +type Provider struct { + provider.BaseProvider `mapstructure:",squash" export:"true"` + Directory string `description:"Load configuration from one or more .toml files in a directory" export:"true"` + TraefikFile string +} + +// Init the provider +func (p *Provider) Init(constraints types.Constraints) error { + return p.BaseProvider.Init(constraints) +} + +// Provide allows the file provider to provide configurations to traefik +// using the given configuration channel. +func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { + configuration, err := p.BuildConfiguration() + if err != nil { + return err + } + + if p.Watch { + var watchItem string + + if len(p.Directory) > 0 { + watchItem = p.Directory + } else if len(p.Filename) > 0 { + watchItem = filepath.Dir(p.Filename) + } else { + watchItem = filepath.Dir(p.TraefikFile) + } + + if err := p.addWatcher(pool, watchItem, configurationChan, p.watcherCallback); err != nil { + return err + } + } + + sendConfigToChannel(configurationChan, configuration) + return nil +} + +// BuildConfiguration loads configuration either from file or a directory specified by 'Filename'/'Directory' +// and returns a 'Configuration' object +func (p *Provider) BuildConfiguration() (*types.Configuration, error) { + if len(p.Directory) > 0 { + return p.loadFileConfigFromDirectory(p.Directory, nil) + } + + if len(p.Filename) > 0 { + return p.loadFileConfig(p.Filename, true) + } + + if len(p.TraefikFile) > 0 { + return p.loadFileConfig(p.TraefikFile, false) + } + + return nil, errors.New("error using file configuration backend, no filename defined") +} + +func (p *Provider) addWatcher(pool *safe.Pool, directory string, configurationChan chan<- types.ConfigMessage, callback func(chan<- types.ConfigMessage, fsnotify.Event)) error { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return fmt.Errorf("error creating file watcher: %s", err) + } + + err = watcher.Add(directory) + if err != nil { + return fmt.Errorf("error adding file watcher: %s", err) + } + + // Process events + pool.Go(func(stop chan bool) { + defer watcher.Close() + for { + select { + case <-stop: + return + case evt := <-watcher.Events: + if p.Directory == "" { + var filename string + if len(p.Filename) > 0 { + filename = p.Filename + } else { + filename = p.TraefikFile + } + + _, evtFileName := filepath.Split(evt.Name) + _, confFileName := filepath.Split(filename) + if evtFileName == confFileName { + callback(configurationChan, evt) + } + } else { + callback(configurationChan, evt) + } + case err := <-watcher.Errors: + log.Errorf("Watcher event error: %s", err) + } + } + }) + return nil +} + +func (p *Provider) watcherCallback(configurationChan chan<- types.ConfigMessage, event fsnotify.Event) { + watchItem := p.TraefikFile + if len(p.Directory) > 0 { + watchItem = p.Directory + } else if len(p.Filename) > 0 { + watchItem = p.Filename + } + + if _, err := os.Stat(watchItem); err != nil { + log.Debugf("Unable to watch %s : %v", watchItem, err) + return + } + + configuration, err := p.BuildConfiguration() + + if err != nil { + log.Errorf("Error occurred during watcher callback: %s", err) + return + } + + sendConfigToChannel(configurationChan, configuration) +} + +func sendConfigToChannel(configurationChan chan<- types.ConfigMessage, configuration *types.Configuration) { + configurationChan <- types.ConfigMessage{ + ProviderName: "file", + Configuration: configuration, + } +} + +func readFile(filename string) (string, error) { + if len(filename) > 0 { + buf, err := ioutil.ReadFile(filename) + if err != nil { + return "", err + } + return string(buf), nil + } + return "", fmt.Errorf("invalid filename: %s", filename) +} + +func (p *Provider) loadFileConfig(filename string, parseTemplate bool) (*types.Configuration, error) { + fileContent, err := readFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading configuration file: %s - %s", filename, err) + } + + var configuration *types.Configuration + if parseTemplate { + configuration, err = p.CreateConfiguration(fileContent, template.FuncMap{}, false) + } else { + configuration, err = p.DecodeConfiguration(fileContent) + } + + if err != nil { + return nil, err + } + if configuration == nil || configuration.Backends == nil && configuration.Frontends == nil && configuration.TLS == nil { + configuration = &types.Configuration{ + Frontends: make(map[string]*types.Frontend), + Backends: make(map[string]*types.Backend), + } + } + return configuration, err +} + +func (p *Provider) loadFileConfigFromDirectory(directory string, configuration *types.Configuration) (*types.Configuration, error) { + fileList, err := ioutil.ReadDir(directory) + + if err != nil { + return configuration, fmt.Errorf("unable to read directory %s: %v", directory, err) + } + + if configuration == nil { + configuration = &types.Configuration{ + Frontends: make(map[string]*types.Frontend), + Backends: make(map[string]*types.Backend), + } + } + + configTLSMaps := make(map[*tls.Configuration]struct{}) + for _, item := range fileList { + + if item.IsDir() { + configuration, err = p.loadFileConfigFromDirectory(filepath.Join(directory, item.Name()), configuration) + if err != nil { + return configuration, fmt.Errorf("unable to load content configuration from subdirectory %s: %v", item, err) + } + continue + } else if !strings.HasSuffix(item.Name(), ".toml") && !strings.HasSuffix(item.Name(), ".tmpl") { + continue + } + + var c *types.Configuration + c, err = p.loadFileConfig(path.Join(directory, item.Name()), true) + + if err != nil { + return configuration, err + } + + for backendName, backend := range c.Backends { + if _, exists := configuration.Backends[backendName]; exists { + log.Warnf("Backend %s already configured, skipping", backendName) + } else { + configuration.Backends[backendName] = backend + } + } + + for frontendName, frontend := range c.Frontends { + if _, exists := configuration.Frontends[frontendName]; exists { + log.Warnf("Frontend %s already configured, skipping", frontendName) + } else { + configuration.Frontends[frontendName] = frontend + } + } + + for _, conf := range c.TLS { + if _, exists := configTLSMaps[conf]; exists { + log.Warnf("TLS Configuration %v already configured, skipping", conf) + } else { + configTLSMaps[conf] = struct{}{} + } + } + + } + for conf := range configTLSMaps { + configuration.TLS = append(configuration.TLS, conf) + } + return configuration, nil +} diff --git a/old/provider/file/file_test.go b/old/provider/file/file_test.go new file mode 100644 index 000000000..4140afddd --- /dev/null +++ b/old/provider/file/file_test.go @@ -0,0 +1,338 @@ +package file + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path" + "testing" + "time" + + "github.com/containous/traefik/old/types" + "github.com/containous/traefik/safe" + "github.com/stretchr/testify/assert" +) + +// createRandomFile Helper +func createRandomFile(t *testing.T, tempDir string, contents ...string) *os.File { + return createFile(t, tempDir, fmt.Sprintf("temp%d.toml", time.Now().UnixNano()), contents...) +} + +// createFile Helper +func createFile(t *testing.T, tempDir string, name string, contents ...string) *os.File { + t.Helper() + fileName := path.Join(tempDir, name) + + tempFile, err := os.Create(fileName) + if err != nil { + t.Fatal(err) + } + + for _, content := range contents { + _, err := tempFile.WriteString(content) + if err != nil { + t.Fatal(err) + } + } + + err = tempFile.Close() + if err != nil { + t.Fatal(err) + } + + return tempFile +} + +// createTempDir Helper +func createTempDir(t *testing.T, dir string) string { + t.Helper() + d, err := ioutil.TempDir("", dir) + if err != nil { + t.Fatal(err) + } + return d +} + +// createFrontendConfiguration Helper +func createFrontendConfiguration(n int) string { + conf := "[frontends]\n" + for i := 1; i <= n; i++ { + conf += fmt.Sprintf(` [frontends."frontend%[1]d"] + backend = "backend%[1]d" +`, i) + } + return conf +} + +// createBackendConfiguration Helper +func createBackendConfiguration(n int) string { + conf := "[backends]\n" + for i := 1; i <= n; i++ { + conf += fmt.Sprintf(` [backends.backend%[1]d] + [backends.backend%[1]d.servers.server1] + url = "http://172.17.0.%[1]d:80" +`, i) + } + return conf +} + +// createTLS Helper +func createTLS(n int) string { + var conf string + for i := 1; i <= n; i++ { + conf += fmt.Sprintf(`[[TLS]] + EntryPoints = ["https"] + [TLS.Certificate] + CertFile = "integration/fixtures/https/snitest%[1]d.com.cert" + KeyFile = "integration/fixtures/https/snitest%[1]d.com.key" +`, i) + } + return conf +} + +type ProvideTestCase struct { + desc string + directoryContent []string + fileContent string + traefikFileContent string + expectedNumFrontend int + expectedNumBackend int + expectedNumTLSConf int +} + +func getTestCases() []ProvideTestCase { + return []ProvideTestCase{ + { + desc: "simple file", + fileContent: createFrontendConfiguration(2) + createBackendConfiguration(3) + createTLS(4), + expectedNumFrontend: 2, + expectedNumBackend: 3, + expectedNumTLSConf: 4, + }, + { + desc: "simple file and a traefik file", + fileContent: createFrontendConfiguration(2) + createBackendConfiguration(3) + createTLS(4), + traefikFileContent: ` + debug=true +`, + expectedNumFrontend: 2, + expectedNumBackend: 3, + expectedNumTLSConf: 4, + }, + { + desc: "template file", + fileContent: ` +[frontends] +{{ range $i, $e := until 20 }} + [frontends.frontend{{ $e }}] + backend = "backend" +{{ end }} +`, + expectedNumFrontend: 20, + }, + { + desc: "simple directory", + directoryContent: []string{ + createFrontendConfiguration(2), + createBackendConfiguration(3), + createTLS(4), + }, + expectedNumFrontend: 2, + expectedNumBackend: 3, + expectedNumTLSConf: 4, + }, + { + desc: "template in directory", + directoryContent: []string{ + ` +[frontends] +{{ range $i, $e := until 20 }} + [frontends.frontend{{ $e }}] + backend = "backend" +{{ end }} +`, + ` +[backends] +{{ range $i, $e := until 20 }} + [backends.backend{{ $e }}] + [backends.backend{{ $e }}.servers.server1] + url="http://127.0.0.1" +{{ end }} +`, + }, + expectedNumFrontend: 20, + expectedNumBackend: 20, + }, + { + desc: "simple traefik file", + traefikFileContent: ` + debug=true + [file] + ` + createFrontendConfiguration(2) + createBackendConfiguration(3) + createTLS(4), + expectedNumFrontend: 2, + expectedNumBackend: 3, + expectedNumTLSConf: 4, + }, + { + desc: "simple traefik file with templating", + traefikFileContent: ` + temp="{{ getTag \"test\" }}" + [file] + ` + createFrontendConfiguration(2) + createBackendConfiguration(3) + createTLS(4), + expectedNumFrontend: 2, + expectedNumBackend: 3, + expectedNumTLSConf: 4, + }, + } +} + +func TestProvideWithoutWatch(t *testing.T) { + for _, test := range getTestCases() { + test := test + t.Run(test.desc+" without watch", func(t *testing.T) { + t.Parallel() + + provider, clean := createProvider(t, test, false) + defer clean() + configChan := make(chan types.ConfigMessage) + + go func() { + err := provider.Provide(configChan, safe.NewPool(context.Background())) + assert.NoError(t, err) + }() + + timeout := time.After(time.Second) + select { + case config := <-configChan: + assert.Len(t, config.Configuration.Backends, test.expectedNumBackend) + assert.Len(t, config.Configuration.Frontends, test.expectedNumFrontend) + assert.Len(t, config.Configuration.TLS, test.expectedNumTLSConf) + case <-timeout: + t.Errorf("timeout while waiting for config") + } + }) + } +} + +func TestProvideWithWatch(t *testing.T) { + for _, test := range getTestCases() { + test := test + t.Run(test.desc+" with watch", func(t *testing.T) { + t.Parallel() + + provider, clean := createProvider(t, test, true) + defer clean() + configChan := make(chan types.ConfigMessage) + + go func() { + err := provider.Provide(configChan, safe.NewPool(context.Background())) + assert.NoError(t, err) + }() + + timeout := time.After(time.Second) + select { + case config := <-configChan: + assert.Len(t, config.Configuration.Backends, 0) + assert.Len(t, config.Configuration.Frontends, 0) + assert.Len(t, config.Configuration.TLS, 0) + case <-timeout: + t.Errorf("timeout while waiting for config") + } + + if len(test.fileContent) > 0 { + if err := ioutil.WriteFile(provider.Filename, []byte(test.fileContent), 0755); err != nil { + t.Error(err) + } + } + + if len(test.traefikFileContent) > 0 { + if err := ioutil.WriteFile(provider.TraefikFile, []byte(test.traefikFileContent), 0755); err != nil { + t.Error(err) + } + } + + if len(test.directoryContent) > 0 { + for _, fileContent := range test.directoryContent { + createRandomFile(t, provider.Directory, fileContent) + } + } + + timeout = time.After(time.Second * 1) + var numUpdates, numBackends, numFrontends, numTLSConfs int + for { + select { + case config := <-configChan: + numUpdates++ + numBackends = len(config.Configuration.Backends) + numFrontends = len(config.Configuration.Frontends) + numTLSConfs = len(config.Configuration.TLS) + t.Logf("received update #%d: backends %d/%d, frontends %d/%d, TLS configs %d/%d", numUpdates, numBackends, test.expectedNumBackend, numFrontends, test.expectedNumFrontend, numTLSConfs, test.expectedNumTLSConf) + + if numBackends == test.expectedNumBackend && numFrontends == test.expectedNumFrontend && numTLSConfs == test.expectedNumTLSConf { + return + } + case <-timeout: + t.Fatal("timeout while waiting for config") + } + } + }) + } +} + +func TestErrorWhenEmptyConfig(t *testing.T) { + provider := &Provider{} + configChan := make(chan types.ConfigMessage) + errorChan := make(chan struct{}) + go func() { + err := provider.Provide(configChan, safe.NewPool(context.Background())) + assert.Error(t, err) + close(errorChan) + }() + + timeout := time.After(time.Second) + select { + case <-configChan: + t.Fatal("We should not receive config message") + case <-timeout: + t.Fatal("timeout while waiting for config") + case <-errorChan: + } +} + +func createProvider(t *testing.T, test ProvideTestCase, watch bool) (*Provider, func()) { + tempDir := createTempDir(t, "testdir") + + provider := &Provider{} + provider.Watch = watch + + if len(test.directoryContent) > 0 { + if !watch { + for _, fileContent := range test.directoryContent { + createRandomFile(t, tempDir, fileContent) + } + } + provider.Directory = tempDir + } + + if len(test.fileContent) > 0 { + if watch { + test.fileContent = "" + } + filename := createRandomFile(t, tempDir, test.fileContent) + provider.Filename = filename.Name() + + } + + if len(test.traefikFileContent) > 0 { + if watch { + test.traefikFileContent = "" + } + filename := createRandomFile(t, tempDir, test.traefikFileContent) + provider.TraefikFile = filename.Name() + } + + return provider, func() { + os.Remove(tempDir) + } +} diff --git a/provider/kubernetes/annotations.go b/old/provider/kubernetes/annotations.go similarity index 99% rename from provider/kubernetes/annotations.go rename to old/provider/kubernetes/annotations.go index f3fa740c6..b4dd3afc4 100644 --- a/provider/kubernetes/annotations.go +++ b/old/provider/kubernetes/annotations.go @@ -3,7 +3,7 @@ package kubernetes import ( "strconv" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" ) const ( diff --git a/provider/kubernetes/annotations_test.go b/old/provider/kubernetes/annotations_test.go similarity index 94% rename from provider/kubernetes/annotations_test.go rename to old/provider/kubernetes/annotations_test.go index c53c0fdcd..3f4edc960 100644 --- a/provider/kubernetes/annotations_test.go +++ b/old/provider/kubernetes/annotations_test.go @@ -3,7 +3,7 @@ package kubernetes import ( "testing" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" "github.com/stretchr/testify/assert" ) diff --git a/provider/kubernetes/builder_configuration_test.go b/old/provider/kubernetes/builder_configuration_test.go similarity index 99% rename from provider/kubernetes/builder_configuration_test.go rename to old/provider/kubernetes/builder_configuration_test.go index 2a252601e..22ec5d0a1 100644 --- a/provider/kubernetes/builder_configuration_test.go +++ b/old/provider/kubernetes/builder_configuration_test.go @@ -5,9 +5,9 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" ) diff --git a/provider/kubernetes/builder_endpoint_test.go b/old/provider/kubernetes/builder_endpoint_test.go similarity index 100% rename from provider/kubernetes/builder_endpoint_test.go rename to old/provider/kubernetes/builder_endpoint_test.go diff --git a/provider/kubernetes/builder_ingress_test.go b/old/provider/kubernetes/builder_ingress_test.go similarity index 100% rename from provider/kubernetes/builder_ingress_test.go rename to old/provider/kubernetes/builder_ingress_test.go diff --git a/provider/kubernetes/builder_service_test.go b/old/provider/kubernetes/builder_service_test.go similarity index 100% rename from provider/kubernetes/builder_service_test.go rename to old/provider/kubernetes/builder_service_test.go diff --git a/provider/kubernetes/client.go b/old/provider/kubernetes/client.go similarity index 99% rename from provider/kubernetes/client.go rename to old/provider/kubernetes/client.go index 78f71c5d8..35698f980 100644 --- a/provider/kubernetes/client.go +++ b/old/provider/kubernetes/client.go @@ -6,7 +6,7 @@ import ( "io/ioutil" "time" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" kubeerror "k8s.io/apimachinery/pkg/api/errors" diff --git a/provider/kubernetes/client_mock_test.go b/old/provider/kubernetes/client_mock_test.go similarity index 100% rename from provider/kubernetes/client_mock_test.go rename to old/provider/kubernetes/client_mock_test.go diff --git a/provider/kubernetes/client_test.go b/old/provider/kubernetes/client_test.go similarity index 100% rename from provider/kubernetes/client_test.go rename to old/provider/kubernetes/client_test.go diff --git a/provider/kubernetes/kubernetes.go b/old/provider/kubernetes/kubernetes.go similarity index 99% rename from provider/kubernetes/kubernetes.go rename to old/provider/kubernetes/kubernetes.go index 82bf59047..9377eee7b 100644 --- a/provider/kubernetes/kubernetes.go +++ b/old/provider/kubernetes/kubernetes.go @@ -16,12 +16,12 @@ import ( "github.com/cenk/backoff" "github.com/containous/traefik/job" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" "gopkg.in/yaml.v2" corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" diff --git a/provider/kubernetes/kubernetes_test.go b/old/provider/kubernetes/kubernetes_test.go similarity index 99% rename from provider/kubernetes/kubernetes_test.go rename to old/provider/kubernetes/kubernetes_test.go index 819f4770d..b52a7ed6c 100644 --- a/provider/kubernetes/kubernetes_test.go +++ b/old/provider/kubernetes/kubernetes_test.go @@ -8,8 +8,8 @@ import ( "testing" "time" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" diff --git a/provider/kubernetes/namespace.go b/old/provider/kubernetes/namespace.go similarity index 100% rename from provider/kubernetes/namespace.go rename to old/provider/kubernetes/namespace.go diff --git a/provider/kubernetes/percentage.go b/old/provider/kubernetes/percentage.go similarity index 100% rename from provider/kubernetes/percentage.go rename to old/provider/kubernetes/percentage.go diff --git a/provider/kubernetes/percentage_test.go b/old/provider/kubernetes/percentage_test.go similarity index 100% rename from provider/kubernetes/percentage_test.go rename to old/provider/kubernetes/percentage_test.go diff --git a/provider/kubernetes/weight_allocator.go b/old/provider/kubernetes/weight_allocator.go similarity index 99% rename from provider/kubernetes/weight_allocator.go rename to old/provider/kubernetes/weight_allocator.go index 1b13dbe12..60773256a 100644 --- a/provider/kubernetes/weight_allocator.go +++ b/old/provider/kubernetes/weight_allocator.go @@ -5,7 +5,7 @@ import ( "sort" "strings" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" "gopkg.in/yaml.v2" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" ) diff --git a/provider/kubernetes/weight_allocator_test.go b/old/provider/kubernetes/weight_allocator_test.go similarity index 100% rename from provider/kubernetes/weight_allocator_test.go rename to old/provider/kubernetes/weight_allocator_test.go diff --git a/provider/kv/filler_test.go b/old/provider/kv/filler_test.go similarity index 100% rename from provider/kv/filler_test.go rename to old/provider/kv/filler_test.go diff --git a/provider/kv/keynames.go b/old/provider/kv/keynames.go similarity index 100% rename from provider/kv/keynames.go rename to old/provider/kv/keynames.go diff --git a/provider/kv/kv.go b/old/provider/kv/kv.go similarity index 96% rename from provider/kv/kv.go rename to old/provider/kv/kv.go index 23c70e382..0e5fb2fb3 100644 --- a/provider/kv/kv.go +++ b/old/provider/kv/kv.go @@ -10,10 +10,10 @@ import ( "github.com/abronan/valkeyrie/store" "github.com/cenk/backoff" "github.com/containous/traefik/job" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" ) // Provider holds common configurations of key-value providers. diff --git a/provider/kv/kv_config.go b/old/provider/kv/kv_config.go similarity index 99% rename from provider/kv/kv_config.go rename to old/provider/kv/kv_config.go index 5444b5ab3..a8b379ba8 100644 --- a/provider/kv/kv_config.go +++ b/old/provider/kv/kv_config.go @@ -12,10 +12,10 @@ import ( "github.com/BurntSushi/ty/fun" "github.com/abronan/valkeyrie/store" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" ) func (p *Provider) buildConfiguration() *types.Configuration { diff --git a/provider/kv/kv_config_test.go b/old/provider/kv/kv_config_test.go similarity index 99% rename from provider/kv/kv_config_test.go rename to old/provider/kv/kv_config_test.go index 6f4533b75..c0b46cdda 100644 --- a/provider/kv/kv_config_test.go +++ b/old/provider/kv/kv_config_test.go @@ -8,9 +8,9 @@ import ( "github.com/abronan/valkeyrie/store" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" ) diff --git a/provider/kv/kv_mock_test.go b/old/provider/kv/kv_mock_test.go similarity index 100% rename from provider/kv/kv_mock_test.go rename to old/provider/kv/kv_mock_test.go diff --git a/provider/kv/kv_test.go b/old/provider/kv/kv_test.go similarity index 92% rename from provider/kv/kv_test.go rename to old/provider/kv/kv_test.go index 7e14af4f8..f688fd9a0 100644 --- a/provider/kv/kv_test.go +++ b/old/provider/kv/kv_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/abronan/valkeyrie/store" - "github.com/containous/traefik/log" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" ) func TestKvWatchTree(t *testing.T) { diff --git a/provider/label/label.go b/old/provider/label/label.go similarity index 99% rename from provider/label/label.go rename to old/provider/label/label.go index b0069eed3..1a69d76ba 100644 --- a/provider/label/label.go +++ b/old/provider/label/label.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" ) const ( diff --git a/provider/label/label_test.go b/old/provider/label/label_test.go similarity index 100% rename from provider/label/label_test.go rename to old/provider/label/label_test.go diff --git a/provider/label/names.go b/old/provider/label/names.go similarity index 100% rename from provider/label/names.go rename to old/provider/label/names.go diff --git a/provider/label/partial.go b/old/provider/label/partial.go similarity index 99% rename from provider/label/partial.go rename to old/provider/label/partial.go index 66f6ca213..5457bb08e 100644 --- a/provider/label/partial.go +++ b/old/provider/label/partial.go @@ -7,8 +7,8 @@ import ( "strings" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/log" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" ) // GetWhiteList Create white list from labels diff --git a/provider/label/partial_test.go b/old/provider/label/partial_test.go similarity index 99% rename from provider/label/partial_test.go rename to old/provider/label/partial_test.go index abaf24d69..64a421be8 100644 --- a/provider/label/partial_test.go +++ b/old/provider/label/partial_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/types" "github.com/stretchr/testify/assert" ) diff --git a/provider/label/segment.go b/old/provider/label/segment.go similarity index 98% rename from provider/label/segment.go rename to old/provider/label/segment.go index a0853d420..06a4cd85b 100644 --- a/provider/label/segment.go +++ b/old/provider/label/segment.go @@ -4,7 +4,7 @@ import ( "regexp" "strings" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" ) var ( diff --git a/provider/label/segment_test.go b/old/provider/label/segment_test.go similarity index 100% rename from provider/label/segment_test.go rename to old/provider/label/segment_test.go diff --git a/provider/marathon/builder_test.go b/old/provider/marathon/builder_test.go similarity index 98% rename from provider/marathon/builder_test.go rename to old/provider/marathon/builder_test.go index 86a190e2a..00021cd54 100644 --- a/provider/marathon/builder_test.go +++ b/old/provider/marathon/builder_test.go @@ -4,7 +4,7 @@ import ( "strings" "time" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" "github.com/gambol99/go-marathon" ) diff --git a/provider/marathon/config.go b/old/provider/marathon/config.go similarity index 98% rename from provider/marathon/config.go rename to old/provider/marathon/config.go index f64151fd3..a74a18f06 100644 --- a/provider/marathon/config.go +++ b/old/provider/marathon/config.go @@ -9,10 +9,10 @@ import ( "strings" "text/template" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/gambol99/go-marathon" ) diff --git a/provider/marathon/config_segment_test.go b/old/provider/marathon/config_segment_test.go similarity index 99% rename from provider/marathon/config_segment_test.go rename to old/provider/marathon/config_segment_test.go index dfaaf98d0..56cd04701 100644 --- a/provider/marathon/config_segment_test.go +++ b/old/provider/marathon/config_segment_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/gambol99/go-marathon" "github.com/stretchr/testify/assert" ) diff --git a/provider/marathon/config_test.go b/old/provider/marathon/config_test.go similarity index 99% rename from provider/marathon/config_test.go rename to old/provider/marathon/config_test.go index a7df27aba..e1074d984 100644 --- a/provider/marathon/config_test.go +++ b/old/provider/marathon/config_test.go @@ -6,8 +6,8 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/gambol99/go-marathon" "github.com/stretchr/testify/assert" ) diff --git a/provider/marathon/convert_types.go b/old/provider/marathon/convert_types.go similarity index 100% rename from provider/marathon/convert_types.go rename to old/provider/marathon/convert_types.go diff --git a/provider/marathon/fake_client_test.go b/old/provider/marathon/fake_client_test.go similarity index 90% rename from provider/marathon/fake_client_test.go rename to old/provider/marathon/fake_client_test.go index 42f5e0235..70e51d830 100644 --- a/provider/marathon/fake_client_test.go +++ b/old/provider/marathon/fake_client_test.go @@ -3,7 +3,7 @@ package marathon import ( "errors" - "github.com/containous/traefik/provider/marathon/mocks" + "github.com/containous/traefik/old/provider/marathon/mocks" "github.com/gambol99/go-marathon" "github.com/stretchr/testify/mock" ) diff --git a/provider/marathon/marathon.go b/old/provider/marathon/marathon.go similarity index 98% rename from provider/marathon/marathon.go rename to old/provider/marathon/marathon.go index c07856d79..8f74e00f9 100644 --- a/provider/marathon/marathon.go +++ b/old/provider/marathon/marathon.go @@ -9,10 +9,10 @@ import ( "github.com/cenk/backoff" "github.com/containous/flaeg/parse" "github.com/containous/traefik/job" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" "github.com/gambol99/go-marathon" "github.com/sirupsen/logrus" ) diff --git a/provider/marathon/mocks/Marathon.go b/old/provider/marathon/mocks/Marathon.go similarity index 100% rename from provider/marathon/mocks/Marathon.go rename to old/provider/marathon/mocks/Marathon.go diff --git a/provider/marathon/readiness.go b/old/provider/marathon/readiness.go similarity index 99% rename from provider/marathon/readiness.go rename to old/provider/marathon/readiness.go index 63b7fbfe3..d6b92b315 100644 --- a/provider/marathon/readiness.go +++ b/old/provider/marathon/readiness.go @@ -3,7 +3,7 @@ package marathon import ( "time" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" "github.com/gambol99/go-marathon" ) diff --git a/provider/marathon/readiness_test.go b/old/provider/marathon/readiness_test.go similarity index 100% rename from provider/marathon/readiness_test.go rename to old/provider/marathon/readiness_test.go diff --git a/provider/mesos/config.go b/old/provider/mesos/config.go similarity index 98% rename from provider/mesos/config.go rename to old/provider/mesos/config.go index 4ac0094e8..71b2325d6 100644 --- a/provider/mesos/config.go +++ b/old/provider/mesos/config.go @@ -8,10 +8,10 @@ import ( "strings" "text/template" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/mesosphere/mesos-dns/records/state" ) diff --git a/provider/mesos/config_segment_test.go b/old/provider/mesos/config_segment_test.go similarity index 99% rename from provider/mesos/config_segment_test.go rename to old/provider/mesos/config_segment_test.go index a5ef74da5..966ca8bbd 100644 --- a/provider/mesos/config_segment_test.go +++ b/old/provider/mesos/config_segment_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/mesosphere/mesos-dns/records/state" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/provider/mesos/config_test.go b/old/provider/mesos/config_test.go similarity index 99% rename from provider/mesos/config_test.go rename to old/provider/mesos/config_test.go index 6798b872f..86e483da4 100644 --- a/provider/mesos/config_test.go +++ b/old/provider/mesos/config_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/mesosphere/mesos-dns/records/state" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/provider/mesos/mesos.go b/old/provider/mesos/mesos.go similarity index 97% rename from provider/mesos/mesos.go rename to old/provider/mesos/mesos.go index 4f44040f2..34aedd419 100644 --- a/provider/mesos/mesos.go +++ b/old/provider/mesos/mesos.go @@ -7,10 +7,10 @@ import ( "github.com/cenk/backoff" "github.com/containous/traefik/job" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" "github.com/mesos/mesos-go/detector" "github.com/mesosphere/mesos-dns/records" "github.com/mesosphere/mesos-dns/records/state" diff --git a/provider/mesos/mesos_helper_test.go b/old/provider/mesos/mesos_helper_test.go similarity index 98% rename from provider/mesos/mesos_helper_test.go rename to old/provider/mesos/mesos_helper_test.go index 4fd04f941..dc1881923 100644 --- a/provider/mesos/mesos_helper_test.go +++ b/old/provider/mesos/mesos_helper_test.go @@ -4,7 +4,7 @@ import ( "strings" "testing" - "github.com/containous/traefik/provider/label" + "github.com/containous/traefik/old/provider/label" "github.com/mesosphere/mesos-dns/records/state" "github.com/stretchr/testify/assert" ) diff --git a/provider/mesos/mesos_test.go b/old/provider/mesos/mesos_test.go similarity index 100% rename from provider/mesos/mesos_test.go rename to old/provider/mesos/mesos_test.go diff --git a/old/provider/provider.go b/old/provider/provider.go new file mode 100644 index 000000000..0cf471a0d --- /dev/null +++ b/old/provider/provider.go @@ -0,0 +1,149 @@ +package provider + +import ( + "bytes" + "io/ioutil" + "strings" + "text/template" + "unicode" + + "github.com/BurntSushi/toml" + "github.com/Masterminds/sprig" + "github.com/containous/traefik/autogen/gentemplates" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" + "github.com/containous/traefik/safe" +) + +// Provider defines methods of a provider. +type Provider interface { + // Provide allows the provider to provide configurations to traefik + // using the given configuration channel. + Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error + Init(constraints types.Constraints) error +} + +// BaseProvider should be inherited by providers +type BaseProvider struct { + Watch bool `description:"Watch provider" export:"true"` + Filename string `description:"Override default configuration template. For advanced users :)" export:"true"` + Constraints types.Constraints `description:"Filter services by constraint, matching with Traefik tags." export:"true"` + Trace bool `description:"Display additional provider logs (if available)." export:"true"` + DebugLogGeneratedTemplate bool `description:"Enable debug logging of generated configuration template." export:"true"` +} + +// Init for compatibility reason the BaseProvider implements an empty Init +func (p *BaseProvider) Init(constraints types.Constraints) error { + p.Constraints = append(p.Constraints, constraints...) + return nil +} + +// MatchConstraints must match with EVERY single constraint +// returns first constraint that do not match or nil +func (p *BaseProvider) MatchConstraints(tags []string) (bool, *types.Constraint) { + // if there is no tags and no constraints, filtering is disabled + if len(tags) == 0 && len(p.Constraints) == 0 { + return true, nil + } + + for _, constraint := range p.Constraints { + // xor: if ok and constraint.MustMatch are equal, then no tag is currently matching with the constraint + if ok := constraint.MatchConstraintWithAtLeastOneTag(tags); ok != constraint.MustMatch { + return false, constraint + } + } + + // If no constraint or every constraints matching + return true, nil +} + +// GetConfiguration return the provider configuration from default template (file or content) or overrode template file +func (p *BaseProvider) GetConfiguration(defaultTemplate string, funcMap template.FuncMap, templateObjects interface{}) (*types.Configuration, error) { + tmplContent, err := p.getTemplateContent(defaultTemplate) + if err != nil { + return nil, err + } + return p.CreateConfiguration(tmplContent, funcMap, templateObjects) +} + +// CreateConfiguration create a provider configuration from content using templating +func (p *BaseProvider) CreateConfiguration(tmplContent string, funcMap template.FuncMap, templateObjects interface{}) (*types.Configuration, error) { + var defaultFuncMap = sprig.TxtFuncMap() + // tolower is deprecated in favor of sprig's lower function + defaultFuncMap["tolower"] = strings.ToLower + defaultFuncMap["normalize"] = Normalize + defaultFuncMap["split"] = split + for funcID, funcElement := range funcMap { + defaultFuncMap[funcID] = funcElement + } + + tmpl := template.New(p.Filename).Funcs(defaultFuncMap) + + _, err := tmpl.Parse(tmplContent) + if err != nil { + return nil, err + } + + var buffer bytes.Buffer + err = tmpl.Execute(&buffer, templateObjects) + if err != nil { + return nil, err + } + + var renderedTemplate = buffer.String() + if p.DebugLogGeneratedTemplate { + log.Debugf("Template content: %s", tmplContent) + log.Debugf("Rendering results: %s", renderedTemplate) + } + return p.DecodeConfiguration(renderedTemplate) +} + +// DecodeConfiguration Decode a *types.Configuration from a content +func (p *BaseProvider) DecodeConfiguration(content string) (*types.Configuration, error) { + configuration := new(types.Configuration) + if _, err := toml.Decode(content, configuration); err != nil { + return nil, err + } + return configuration, nil +} + +func (p *BaseProvider) getTemplateContent(defaultTemplateFile string) (string, error) { + if len(p.Filename) > 0 { + buf, err := ioutil.ReadFile(p.Filename) + if err != nil { + return "", err + } + return string(buf), nil + } + + if strings.HasSuffix(defaultTemplateFile, ".tmpl") { + buf, err := gentemplates.Asset(defaultTemplateFile) + if err != nil { + return "", err + } + return string(buf), nil + } + + return defaultTemplateFile, nil +} + +func split(sep, s string) []string { + return strings.Split(s, sep) +} + +// Normalize transform a string that work with the rest of traefik +// Replace '.' with '-' in quoted keys because of this issue https://github.com/BurntSushi/toml/issues/78 +func Normalize(name string) string { + fargs := func(c rune) bool { + return !unicode.IsLetter(c) && !unicode.IsNumber(c) + } + // get function + return strings.Join(strings.FieldsFunc(name, fargs), "-") +} + +// ReverseStringSlice invert the order of the given slice of string +func ReverseStringSlice(slice *[]string) { + for i, j := 0, len(*slice)-1; i < j; i, j = i+1, j-1 { + (*slice)[i], (*slice)[j] = (*slice)[j], (*slice)[i] + } +} diff --git a/provider/provider_test.go b/old/provider/provider_test.go similarity index 97% rename from provider/provider_test.go rename to old/provider/provider_test.go index b1b1ddaec..3744da038 100644 --- a/provider/provider_test.go +++ b/old/provider/provider_test.go @@ -7,7 +7,7 @@ import ( "testing" "text/template" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -22,6 +22,7 @@ func (p *myProvider) Foo() string { } func TestConfigurationErrors(t *testing.T) { + t.Skip("deprecated") templateErrorFile, err := ioutil.TempFile("", "provider-configuration-error") require.NoError(t, err) @@ -98,6 +99,7 @@ func TestConfigurationErrors(t *testing.T) { } func TestGetConfiguration(t *testing.T) { + t.Skip("deprecated") templateFile, err := ioutil.TempFile("", "provider-configuration") require.NoError(t, err) @@ -138,6 +140,7 @@ func TestGetConfiguration(t *testing.T) { } func TestGetConfigurationReturnsCorrectMaxConnConfiguration(t *testing.T) { + t.Skip("deprecated") templateFile, err := ioutil.TempFile("", "provider-configuration") require.NoError(t, err) @@ -168,6 +171,7 @@ func TestGetConfigurationReturnsCorrectMaxConnConfiguration(t *testing.T) { } func TestNilClientTLS(t *testing.T) { + t.Skip("deprecated") p := &myProvider{ BaseProvider: BaseProvider{ Filename: "", @@ -179,6 +183,7 @@ func TestNilClientTLS(t *testing.T) { } func TestInsecureSkipVerifyClientTLS(t *testing.T) { + t.Skip("deprecated") p := &myProvider{ BaseProvider: BaseProvider{ Filename: "", @@ -195,6 +200,7 @@ func TestInsecureSkipVerifyClientTLS(t *testing.T) { } func TestInsecureSkipVerifyFalseClientTLS(t *testing.T) { + t.Skip("deprecated") p := &myProvider{ BaseProvider: BaseProvider{ Filename: "", @@ -209,6 +215,7 @@ func TestInsecureSkipVerifyFalseClientTLS(t *testing.T) { } func TestMatchingConstraints(t *testing.T) { + t.Skip("deprecated") testCases := []struct { desc string constraints types.Constraints @@ -311,6 +318,7 @@ func TestMatchingConstraints(t *testing.T) { } func TestDefaultFuncMap(t *testing.T) { + t.Skip("deprecated") templateFile, err := ioutil.TempFile("", "provider-configuration") require.NoError(t, err) defer os.RemoveAll(templateFile.Name()) @@ -357,6 +365,7 @@ func TestDefaultFuncMap(t *testing.T) { } func TestSprigFunctions(t *testing.T) { + t.Skip("deprecated") templateFile, err := ioutil.TempFile("", "provider-configuration") require.NoError(t, err) @@ -397,6 +406,7 @@ func TestSprigFunctions(t *testing.T) { } func TestBaseProvider_GetConfiguration(t *testing.T) { + t.Skip("deprecated") baseProvider := BaseProvider{} testCases := []struct { @@ -427,6 +437,7 @@ func TestBaseProvider_GetConfiguration(t *testing.T) { } func TestNormalize(t *testing.T) { + t.Skip("deprecated") testCases := []struct { desc string name string @@ -466,6 +477,7 @@ func TestNormalize(t *testing.T) { } func readTemplateFile(t *testing.T, path string) string { + t.Skip("deprecated") t.Helper() expectedContent, err := ioutil.ReadFile(path) if err != nil { diff --git a/provider/rancher/api.go b/old/provider/rancher/api.go similarity index 98% rename from provider/rancher/api.go rename to old/provider/rancher/api.go index a3e81b6cb..7610cdd1f 100644 --- a/provider/rancher/api.go +++ b/old/provider/rancher/api.go @@ -7,9 +7,9 @@ import ( "github.com/cenk/backoff" "github.com/containous/traefik/job" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" "github.com/mitchellh/mapstructure" rancher "github.com/rancher/go-rancher/v2" ) diff --git a/provider/rancher/config.go b/old/provider/rancher/config.go similarity index 97% rename from provider/rancher/config.go rename to old/provider/rancher/config.go index 0346ab2a5..28f6863c7 100644 --- a/provider/rancher/config.go +++ b/old/provider/rancher/config.go @@ -8,10 +8,10 @@ import ( "text/template" "github.com/BurntSushi/ty/fun" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" ) func (p *Provider) buildConfiguration(services []rancherData) *types.Configuration { diff --git a/provider/rancher/config_test.go b/old/provider/rancher/config_test.go similarity index 99% rename from provider/rancher/config_test.go rename to old/provider/rancher/config_test.go index 10212a0ab..23e4a8c68 100644 --- a/provider/rancher/config_test.go +++ b/old/provider/rancher/config_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/provider/label" + "github.com/containous/traefik/old/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/provider/rancher/metadata.go b/old/provider/rancher/metadata.go similarity index 97% rename from provider/rancher/metadata.go rename to old/provider/rancher/metadata.go index 8de2f255d..ea3c6a95f 100644 --- a/provider/rancher/metadata.go +++ b/old/provider/rancher/metadata.go @@ -7,9 +7,9 @@ import ( "github.com/cenk/backoff" "github.com/containous/traefik/job" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" "github.com/sirupsen/logrus" rancher "github.com/rancher/go-rancher-metadata/metadata" diff --git a/provider/rancher/rancher.go b/old/provider/rancher/rancher.go similarity index 95% rename from provider/rancher/rancher.go rename to old/provider/rancher/rancher.go index 3fe96b21c..f200cea49 100644 --- a/provider/rancher/rancher.go +++ b/old/provider/rancher/rancher.go @@ -3,10 +3,10 @@ package rancher import ( "fmt" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" ) const ( diff --git a/provider/rest/rest.go b/old/provider/rest/rest.go similarity index 96% rename from provider/rest/rest.go rename to old/provider/rest/rest.go index f3ffe5136..6d553792a 100644 --- a/provider/rest/rest.go +++ b/old/provider/rest/rest.go @@ -7,9 +7,9 @@ import ( "net/http" "github.com/containous/mux" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" "github.com/unrolled/render" ) diff --git a/provider/zk/zk.go b/old/provider/zk/zk.go similarity index 88% rename from provider/zk/zk.go rename to old/provider/zk/zk.go index 7edc48c29..3a783a72f 100644 --- a/provider/zk/zk.go +++ b/old/provider/zk/zk.go @@ -5,10 +5,10 @@ import ( "github.com/abronan/valkeyrie/store" "github.com/abronan/valkeyrie/store/zookeeper" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/kv" + "github.com/containous/traefik/old/provider" + "github.com/containous/traefik/old/provider/kv" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" ) var _ provider.Provider = (*Provider)(nil) diff --git a/old/types/dns_resolvers.go b/old/types/dns_resolvers.go new file mode 100644 index 000000000..dd96f7895 --- /dev/null +++ b/old/types/dns_resolvers.go @@ -0,0 +1,44 @@ +package types + +import ( + "fmt" + "strings" +) + +// DNSResolvers is a list of DNSes that we will try to resolve the challenged FQDN against +type DNSResolvers []string + +// String is the method to format the flag's value, part of the flag.Value interface. +// The String method's output will be used in diagnostics. +func (r *DNSResolvers) String() string { + return strings.Join(*r, ",") +} + +// Set is the method to set the flag value, part of the flag.Value interface. +// Set's argument is a string to be parsed to set the flag. +// It's a comma-separated list, so we split it. +func (r *DNSResolvers) Set(value string) error { + entryPoints := strings.Split(value, ",") + if len(entryPoints) == 0 { + return fmt.Errorf("wrong DNSResolvers format: %s", value) + } + for _, entryPoint := range entryPoints { + *r = append(*r, entryPoint) + } + return nil +} + +// Get return the DNSResolvers list +func (r *DNSResolvers) Get() interface{} { + return *r +} + +// SetValue sets the DNSResolvers list +func (r *DNSResolvers) SetValue(val interface{}) { + *r = val.(DNSResolvers) +} + +// Type is type of the struct +func (r *DNSResolvers) Type() string { + return "dnsresolvers" +} diff --git a/old/types/domain_test.go b/old/types/domain_test.go new file mode 100644 index 000000000..dc97c7971 --- /dev/null +++ b/old/types/domain_test.go @@ -0,0 +1,182 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDomain_ToStrArray(t *testing.T) { + testCases := []struct { + desc string + domain Domain + expected []string + }{ + { + desc: "with Main and SANs", + domain: Domain{ + Main: "foo.com", + SANs: []string{"bar.foo.com", "bir.foo.com"}, + }, + expected: []string{"foo.com", "bar.foo.com", "bir.foo.com"}, + }, + { + desc: "without SANs", + domain: Domain{ + Main: "foo.com", + }, + expected: []string{"foo.com"}, + }, + { + desc: "without Main", + domain: Domain{ + SANs: []string{"bar.foo.com", "bir.foo.com"}, + }, + expected: []string{"bar.foo.com", "bir.foo.com"}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + domains := test.domain.ToStrArray() + assert.EqualValues(t, test.expected, domains) + }) + } +} + +func TestDomain_Set(t *testing.T) { + testCases := []struct { + desc string + rawDomains []string + expected Domain + }{ + { + desc: "with 3 domains", + rawDomains: []string{"foo.com", "bar.foo.com", "bir.foo.com"}, + expected: Domain{ + Main: "foo.com", + SANs: []string{"bar.foo.com", "bir.foo.com"}, + }, + }, + { + desc: "with 1 domain", + rawDomains: []string{"foo.com"}, + expected: Domain{ + Main: "foo.com", + SANs: []string{}, + }, + }, + { + desc: "", + rawDomains: nil, + expected: Domain{}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + domain := Domain{} + domain.Set(test.rawDomains) + + assert.Equal(t, test.expected, domain) + }) + } +} + +func TestMatchDomain(t *testing.T) { + testCases := []struct { + desc string + certDomain string + domain string + expected bool + }{ + { + desc: "exact match", + certDomain: "traefik.wtf", + domain: "traefik.wtf", + expected: true, + }, + { + desc: "wildcard and root domain", + certDomain: "*.traefik.wtf", + domain: "traefik.wtf", + expected: false, + }, + { + desc: "wildcard and sub domain", + certDomain: "*.traefik.wtf", + domain: "sub.traefik.wtf", + expected: true, + }, + { + desc: "wildcard and sub sub domain", + certDomain: "*.traefik.wtf", + domain: "sub.sub.traefik.wtf", + expected: false, + }, + { + desc: "double wildcard and sub sub domain", + certDomain: "*.*.traefik.wtf", + domain: "sub.sub.traefik.wtf", + expected: true, + }, + { + desc: "sub sub domain and invalid wildcard", + certDomain: "sub.*.traefik.wtf", + domain: "sub.sub.traefik.wtf", + expected: false, + }, + { + desc: "sub sub domain and valid wildcard", + certDomain: "*.sub.traefik.wtf", + domain: "sub.sub.traefik.wtf", + expected: true, + }, + { + desc: "dot replaced by a cahr", + certDomain: "sub.sub.traefik.wtf", + domain: "sub.sub.traefikiwtf", + expected: false, + }, + { + desc: "*", + certDomain: "*", + domain: "sub.sub.traefik.wtf", + expected: false, + }, + { + desc: "?", + certDomain: "?", + domain: "sub.sub.traefik.wtf", + expected: false, + }, + { + desc: "...................", + certDomain: "...................", + domain: "sub.sub.traefik.wtf", + expected: false, + }, + { + desc: "wildcard and *", + certDomain: "*.traefik.wtf", + domain: "*.*.traefik.wtf", + expected: false, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + domains := MatchDomain(test.domain, test.certDomain) + assert.Equal(t, test.expected, domains) + }) + } +} diff --git a/old/types/domains.go b/old/types/domains.go new file mode 100644 index 000000000..2cace3f64 --- /dev/null +++ b/old/types/domains.go @@ -0,0 +1,88 @@ +package types + +import ( + "fmt" + "strings" +) + +// Domain holds a domain name with SANs +type Domain struct { + Main string + SANs []string +} + +// ToStrArray convert a domain into an array of strings +func (d *Domain) ToStrArray() []string { + var domains []string + if len(d.Main) > 0 { + domains = []string{d.Main} + } + return append(domains, d.SANs...) +} + +// Set sets a domains from an array of strings +func (d *Domain) Set(domains []string) { + if len(domains) > 0 { + d.Main = domains[0] + d.SANs = domains[1:] + } +} + +// Domains parse []Domain +type Domains []Domain + +// Set []Domain +func (ds *Domains) Set(str string) error { + fargs := func(c rune) bool { + return c == ',' || c == ';' + } + + // get function + slice := strings.FieldsFunc(str, fargs) + if len(slice) < 1 { + return fmt.Errorf("parse error ACME.Domain. Unable to parse %s", str) + } + + d := Domain{ + Main: slice[0], + } + + if len(slice) > 1 { + d.SANs = slice[1:] + } + + *ds = append(*ds, d) + return nil +} + +// Get []Domain +func (ds *Domains) Get() interface{} { return []Domain(*ds) } + +// String returns []Domain in string +func (ds *Domains) String() string { return fmt.Sprintf("%+v", *ds) } + +// SetValue sets []Domain into the parser +func (ds *Domains) SetValue(val interface{}) { + *ds = val.([]Domain) +} + +// MatchDomain return true if a domain match the cert domain +func MatchDomain(domain string, certDomain string) bool { + if domain == certDomain { + return true + } + + for len(certDomain) > 0 && certDomain[len(certDomain)-1] == '.' { + certDomain = certDomain[:len(certDomain)-1] + } + + labels := strings.Split(domain, ".") + for i := range labels { + labels[i] = "*" + candidate := strings.Join(labels, ".") + if certDomain == candidate { + return true + } + } + return false +} diff --git a/types/internal_router.go b/old/types/internal_router.go similarity index 100% rename from types/internal_router.go rename to old/types/internal_router.go diff --git a/old/types/logs.go b/old/types/logs.go new file mode 100644 index 000000000..118de9c5f --- /dev/null +++ b/old/types/logs.go @@ -0,0 +1,200 @@ +package types + +import ( + "fmt" + "strings" + + "github.com/containous/flaeg/parse" +) + +const ( + // AccessLogKeep is the keep string value + AccessLogKeep = "keep" + // AccessLogDrop is the drop string value + AccessLogDrop = "drop" + // AccessLogRedact is the redact string value + AccessLogRedact = "redact" +) + +// TraefikLog holds the configuration settings for the traefik logger. +type TraefikLog struct { + FilePath string `json:"file,omitempty" description:"Traefik log file path. Stdout is used when omitted or empty"` + Format string `json:"format,omitempty" description:"Traefik log format: json | common"` +} + +// AccessLog holds the configuration settings for the access logger (middlewares/accesslog). +type AccessLog struct { + FilePath string `json:"file,omitempty" description:"Access log file path. Stdout is used when omitted or empty" export:"true"` + Format string `json:"format,omitempty" description:"Access log format: json | common" export:"true"` + Filters *AccessLogFilters `json:"filters,omitempty" description:"Access log filters, used to keep only specific access logs" export:"true"` + Fields *AccessLogFields `json:"fields,omitempty" description:"AccessLogFields" export:"true"` + BufferingSize int64 `json:"bufferingSize,omitempty" description:"Number of access log lines to process in a buffered way. Default 0." export:"true"` +} + +// AccessLogFilters holds filters configuration +type AccessLogFilters struct { + StatusCodes StatusCodes `json:"statusCodes,omitempty" description:"Keep access logs with status codes in the specified range" export:"true"` + RetryAttempts bool `json:"retryAttempts,omitempty" description:"Keep access logs when at least one retry happened" export:"true"` + MinDuration parse.Duration `json:"duration,omitempty" description:"Keep access logs when request took longer than the specified duration" export:"true"` +} + +// FieldHeaders holds configuration for access log headers +type FieldHeaders struct { + DefaultMode string `json:"defaultMode,omitempty" description:"Default mode for fields: keep | drop | redact" export:"true"` + Names FieldHeaderNames `json:"names,omitempty" description:"Override mode for headers" export:"true"` +} + +// StatusCodes holds status codes ranges to filter access log +type StatusCodes []string + +// Set adds strings elem into the the parser +// it splits str on , and ; +func (s *StatusCodes) Set(str string) error { + fargs := func(c rune) bool { + return c == ',' || c == ';' + } + // get function + slice := strings.FieldsFunc(str, fargs) + *s = append(*s, slice...) + return nil +} + +// Get StatusCodes +func (s *StatusCodes) Get() interface{} { return *s } + +// String return slice in a string +func (s *StatusCodes) String() string { return fmt.Sprintf("%v", *s) } + +// SetValue sets StatusCodes into the parser +func (s *StatusCodes) SetValue(val interface{}) { + *s = val.(StatusCodes) +} + +// FieldNames holds maps of fields with specific mode +type FieldNames map[string]string + +// String is the method to format the flag's value, part of the flag.Value interface. +// The String method's output will be used in diagnostics. +func (f *FieldNames) String() string { + return fmt.Sprintf("%+v", *f) +} + +// Get return the FieldNames map +func (f *FieldNames) Get() interface{} { + return *f +} + +// Set is the method to set the flag value, part of the flag.Value interface. +// Set's argument is a string to be parsed to set the flag. +// It's a space-separated list, so we split it. +func (f *FieldNames) Set(value string) error { + // When arguments are passed through YAML, escaped double quotes + // might be added to this string, and they would break the last + // key/value pair. This ensures the string is clean. + value = strings.Trim(value, "\"") + + fields := strings.Fields(value) + + for _, field := range fields { + n := strings.SplitN(field, "=", 2) + if len(n) == 2 { + (*f)[n[0]] = n[1] + } + } + + return nil +} + +// SetValue sets the FieldNames map with val +func (f *FieldNames) SetValue(val interface{}) { + *f = val.(FieldNames) +} + +// FieldHeaderNames holds maps of fields with specific mode +type FieldHeaderNames map[string]string + +// String is the method to format the flag's value, part of the flag.Value interface. +// The String method's output will be used in diagnostics. +func (f *FieldHeaderNames) String() string { + return fmt.Sprintf("%+v", *f) +} + +// Get return the FieldHeaderNames map +func (f *FieldHeaderNames) Get() interface{} { + return *f +} + +// Set is the method to set the flag value, part of the flag.Value interface. +// Set's argument is a string to be parsed to set the flag. +// It's a space-separated list, so we split it. +func (f *FieldHeaderNames) Set(value string) error { + // When arguments are passed through YAML, escaped double quotes + // might be added to this string, and they would break the last + // key/value pair. This ensures the string is clean. + value = strings.Trim(value, "\"") + + fields := strings.Fields(value) + + for _, field := range fields { + n := strings.SplitN(field, "=", 2) + (*f)[n[0]] = n[1] + } + + return nil +} + +// SetValue sets the FieldHeaderNames map with val +func (f *FieldHeaderNames) SetValue(val interface{}) { + *f = val.(FieldHeaderNames) +} + +// AccessLogFields holds configuration for access log fields +type AccessLogFields struct { + DefaultMode string `json:"defaultMode,omitempty" description:"Default mode for fields: keep | drop" export:"true"` + Names FieldNames `json:"names,omitempty" description:"Override mode for fields" export:"true"` + Headers *FieldHeaders `json:"headers,omitempty" description:"Headers to keep, drop or redact" export:"true"` +} + +// Keep check if the field need to be kept or dropped +func (f *AccessLogFields) Keep(field string) bool { + defaultKeep := true + if f != nil { + defaultKeep = checkFieldValue(f.DefaultMode, defaultKeep) + + if v, ok := f.Names[field]; ok { + return checkFieldValue(v, defaultKeep) + } + } + return defaultKeep +} + +// KeepHeader checks if the headers need to be kept, dropped or redacted and returns the status +func (f *AccessLogFields) KeepHeader(header string) string { + defaultValue := AccessLogKeep + if f != nil && f.Headers != nil { + defaultValue = checkFieldHeaderValue(f.Headers.DefaultMode, defaultValue) + + if v, ok := f.Headers.Names[header]; ok { + return checkFieldHeaderValue(v, defaultValue) + } + } + return defaultValue +} + +func checkFieldValue(value string, defaultKeep bool) bool { + switch value { + case AccessLogKeep: + return true + case AccessLogDrop: + return false + default: + return defaultKeep + } +} + +func checkFieldHeaderValue(value string, defaultValue string) string { + if value == AccessLogKeep || value == AccessLogDrop || value == AccessLogRedact { + return value + } + return defaultValue +} diff --git a/old/types/logs_test.go b/old/types/logs_test.go new file mode 100644 index 000000000..0b1bf8ebc --- /dev/null +++ b/old/types/logs_test.go @@ -0,0 +1,419 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusCodesSet(t *testing.T) { + testCases := []struct { + desc string + value string + expected StatusCodes + }{ + { + desc: "One value should return StatusCodes of size 1", + value: "200", + expected: StatusCodes{"200"}, + }, + { + desc: "Two values separated by comma should return StatusCodes of size 2", + value: "200,400", + expected: StatusCodes{"200", "400"}, + }, + { + desc: "Two values separated by semicolon should return StatusCodes of size 2", + value: "200;400", + expected: StatusCodes{"200", "400"}, + }, + { + desc: "Three values separated by comma and semicolon should return StatusCodes of size 3", + value: "200,400;500", + expected: StatusCodes{"200", "400", "500"}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + var statusCodes StatusCodes + err := statusCodes.Set(test.value) + assert.Nil(t, err) + assert.Equal(t, test.expected, statusCodes) + }) + } +} + +func TestStatusCodesGet(t *testing.T) { + testCases := []struct { + desc string + values StatusCodes + expected StatusCodes + }{ + { + desc: "Should return 1 value", + values: StatusCodes{"200"}, + expected: StatusCodes{"200"}, + }, + { + desc: "Should return 2 values", + values: StatusCodes{"200", "400"}, + expected: StatusCodes{"200", "400"}, + }, + { + desc: "Should return 3 values", + values: StatusCodes{"200", "400", "500"}, + expected: StatusCodes{"200", "400", "500"}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + actual := test.values.Get() + assert.Equal(t, test.expected, actual) + }) + } +} + +func TestStatusCodesString(t *testing.T) { + testCases := []struct { + desc string + values StatusCodes + expected string + }{ + { + desc: "Should return 1 value", + values: StatusCodes{"200"}, + expected: "[200]", + }, + { + desc: "Should return 2 values", + values: StatusCodes{"200", "400"}, + expected: "[200 400]", + }, + { + desc: "Should return 3 values", + values: StatusCodes{"200", "400", "500"}, + expected: "[200 400 500]", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + actual := test.values.String() + assert.Equal(t, test.expected, actual) + }) + } +} + +func TestStatusCodesSetValue(t *testing.T) { + testCases := []struct { + desc string + values StatusCodes + expected StatusCodes + }{ + { + desc: "Should return 1 value", + values: StatusCodes{"200"}, + expected: StatusCodes{"200"}, + }, + { + desc: "Should return 2 values", + values: StatusCodes{"200", "400"}, + expected: StatusCodes{"200", "400"}, + }, + { + desc: "Should return 3 values", + values: StatusCodes{"200", "400", "500"}, + expected: StatusCodes{"200", "400", "500"}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + var slice StatusCodes + slice.SetValue(test.values) + assert.Equal(t, test.expected, slice) + }) + } +} + +func TestFieldsNamesSet(t *testing.T) { + testCases := []struct { + desc string + value string + expected *FieldNames + }{ + { + desc: "One value should return FieldNames of size 1", + value: "field-1=foo", + expected: &FieldNames{ + "field-1": "foo", + }, + }, + { + desc: "Two values separated by space should return FieldNames of size 2", + value: "field-1=foo field-2=bar", + expected: &FieldNames{ + "field-1": "foo", + "field-2": "bar", + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + fieldsNames := &FieldNames{} + err := fieldsNames.Set(test.value) + assert.NoError(t, err) + + assert.Equal(t, test.expected, fieldsNames) + }) + } +} + +func TestFieldsNamesGet(t *testing.T) { + testCases := []struct { + desc string + values FieldNames + expected FieldNames + }{ + { + desc: "Should return 1 value", + values: FieldNames{"field-1": "foo"}, + expected: FieldNames{"field-1": "foo"}, + }, + { + desc: "Should return 2 values", + values: FieldNames{"field-1": "foo", "field-2": "bar"}, + expected: FieldNames{"field-1": "foo", "field-2": "bar"}, + }, + { + desc: "Should return 3 values", + values: FieldNames{"field-1": "foo", "field-2": "bar", "field-3": "powpow"}, + expected: FieldNames{"field-1": "foo", "field-2": "bar", "field-3": "powpow"}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + actual := test.values.Get() + assert.Equal(t, test.expected, actual) + }) + } +} + +func TestFieldsNamesString(t *testing.T) { + testCases := []struct { + desc string + values FieldNames + expected string + }{ + { + desc: "Should return 1 value", + values: FieldNames{"field-1": "foo"}, + expected: "map[field-1:foo]", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + actual := test.values.String() + assert.Equal(t, test.expected, actual) + }) + } +} + +func TestFieldsNamesSetValue(t *testing.T) { + testCases := []struct { + desc string + values FieldNames + expected *FieldNames + }{ + { + desc: "Should return 1 value", + values: FieldNames{"field-1": "foo"}, + expected: &FieldNames{"field-1": "foo"}, + }, + { + desc: "Should return 2 values", + values: FieldNames{"field-1": "foo", "field-2": "bar"}, + expected: &FieldNames{"field-1": "foo", "field-2": "bar"}, + }, + { + desc: "Should return 3 values", + values: FieldNames{"field-1": "foo", "field-2": "bar", "field-3": "powpow"}, + expected: &FieldNames{"field-1": "foo", "field-2": "bar", "field-3": "powpow"}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + fieldsNames := &FieldNames{} + fieldsNames.SetValue(test.values) + assert.Equal(t, test.expected, fieldsNames) + }) + } +} + +func TestFieldsHeadersNamesSet(t *testing.T) { + testCases := []struct { + desc string + value string + expected *FieldHeaderNames + }{ + { + desc: "One value should return FieldNames of size 1", + value: "X-HEADER-1=foo", + expected: &FieldHeaderNames{ + "X-HEADER-1": "foo", + }, + }, + { + desc: "Two values separated by space should return FieldNames of size 2", + value: "X-HEADER-1=foo X-HEADER-2=bar", + expected: &FieldHeaderNames{ + "X-HEADER-1": "foo", + "X-HEADER-2": "bar", + }, + }, + { + desc: "Two values separated by space with escaped double quotes should return FieldNames of size 2", + value: "\"X-HEADER-1=foo X-HEADER-2=bar\"", + expected: &FieldHeaderNames{ + "X-HEADER-1": "foo", + "X-HEADER-2": "bar", + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + headersNames := &FieldHeaderNames{} + err := headersNames.Set(test.value) + assert.NoError(t, err) + + assert.Equal(t, test.expected, headersNames) + }) + } +} + +func TestFieldsHeadersNamesGet(t *testing.T) { + testCases := []struct { + desc string + values FieldHeaderNames + expected FieldHeaderNames + }{ + { + desc: "Should return 1 value", + values: FieldHeaderNames{"X-HEADER-1": "foo"}, + expected: FieldHeaderNames{"X-HEADER-1": "foo"}, + }, + { + desc: "Should return 2 values", + values: FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar"}, + expected: FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar"}, + }, + { + desc: "Should return 3 values", + values: FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar", "X-HEADER-3": "powpow"}, + expected: FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar", "X-HEADER-3": "powpow"}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + actual := test.values.Get() + assert.Equal(t, test.expected, actual) + }) + } +} + +func TestFieldsHeadersNamesString(t *testing.T) { + testCases := []struct { + desc string + values FieldHeaderNames + expected string + }{ + { + desc: "Should return 1 value", + values: FieldHeaderNames{"X-HEADER-1": "foo"}, + expected: "map[X-HEADER-1:foo]", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + actual := test.values.String() + assert.Equal(t, test.expected, actual) + }) + } +} + +func TestFieldsHeadersNamesSetValue(t *testing.T) { + testCases := []struct { + desc string + values FieldHeaderNames + expected *FieldHeaderNames + }{ + { + desc: "Should return 1 value", + values: FieldHeaderNames{"X-HEADER-1": "foo"}, + expected: &FieldHeaderNames{"X-HEADER-1": "foo"}, + }, + { + desc: "Should return 2 values", + values: FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar"}, + expected: &FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar"}, + }, + { + desc: "Should return 3 values", + values: FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar", "X-HEADER-3": "powpow"}, + expected: &FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar", "X-HEADER-3": "powpow"}, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + headersNames := &FieldHeaderNames{} + headersNames.SetValue(test.values) + assert.Equal(t, test.expected, headersNames) + }) + } +} diff --git a/types/types.go b/old/types/types.go similarity index 99% rename from types/types.go rename to old/types/types.go index b79bb1e93..2553730c2 100644 --- a/types/types.go +++ b/old/types/types.go @@ -15,7 +15,7 @@ import ( "github.com/containous/flaeg/parse" "github.com/containous/mux" "github.com/containous/traefik/ip" - "github.com/containous/traefik/log" + "github.com/containous/traefik/old/log" traefiktls "github.com/containous/traefik/tls" "github.com/mitchellh/hashstructure" "github.com/ryanuber/go-glob" diff --git a/types/types_test.go b/old/types/types_test.go similarity index 100% rename from types/types_test.go rename to old/types/types_test.go diff --git a/ping/ping.go b/ping/ping.go index 1e7ffa860..c36a0446a 100644 --- a/ping/ping.go +++ b/ping/ping.go @@ -8,9 +8,10 @@ import ( "github.com/containous/mux" ) -// Handler expose ping routes +// Handler expose ping routes. type Handler struct { - EntryPoint string `description:"Ping entryPoint" export:"true"` + EntryPoint string `description:"Ping entryPoint" export:"true"` + Middlewares []string `description:"Middleware list" export:"true"` terminating bool } @@ -22,8 +23,8 @@ func (h *Handler) WithContext(ctx context.Context) { }() } -// AddRoutes add ping routes on a router -func (h *Handler) AddRoutes(router *mux.Router) { +// Append adds ping routes on a router. +func (h *Handler) Append(router *mux.Router) { router.Methods(http.MethodGet, http.MethodHead).Path("/ping"). HandlerFunc(func(response http.ResponseWriter, request *http.Request) { statusCode := http.StatusOK diff --git a/provider/acme/account.go b/provider/acme/account.go index f63147e91..1f74811b5 100644 --- a/provider/acme/account.go +++ b/provider/acme/account.go @@ -1,6 +1,7 @@ package acme import ( + "context" "crypto" "crypto/rand" "crypto/rsa" @@ -24,8 +25,8 @@ const ( ) // NewAccount creates an account -func NewAccount(email string, keyTypeValue string) (*Account, error) { - keyType := GetKeyType(keyTypeValue) +func NewAccount(ctx context.Context, email string, keyTypeValue string) (*Account, error) { + keyType := GetKeyType(ctx, keyTypeValue) // Create a user. New accounts need an email and private key to start privateKey, err := rsa.GenerateKey(rand.Reader, 4096) @@ -52,16 +53,20 @@ func (a *Account) GetRegistration() *acme.RegistrationResource { // GetPrivateKey returns private key func (a *Account) GetPrivateKey() crypto.PrivateKey { - if privateKey, err := x509.ParsePKCS1PrivateKey(a.PrivateKey); err == nil { - return privateKey + privateKey, err := x509.ParsePKCS1PrivateKey(a.PrivateKey) + if err != nil { + log.WithoutContext().WithField(log.ProviderName, "acme"). + Errorf("Cannot unmarshal private key %+v: %v", a.PrivateKey, err) + return nil } - log.Errorf("Cannot unmarshal private key %+v", a.PrivateKey) - return nil + return privateKey } // GetKeyType used to determine which algo to used -func GetKeyType(value string) acme.KeyType { +func GetKeyType(ctx context.Context, value string) acme.KeyType { + logger := log.FromContext(ctx) + switch value { case "EC256": return acme.EC256 @@ -74,10 +79,10 @@ func GetKeyType(value string) acme.KeyType { case "RSA8192": return acme.RSA8192 case "": - log.Infof("The key type is empty. Use default key type %v.", acme.RSA4096) + logger.Infof("The key type is empty. Use default key type %v.", acme.RSA4096) return acme.RSA4096 default: - log.Infof("Unable to determine key type value %q. Use default key type %v.", value, acme.RSA4096) + logger.Infof("Unable to determine the key type value %q: falling back on %v.", value, acme.RSA4096) return acme.RSA4096 } } diff --git a/provider/acme/challenge_http.go b/provider/acme/challenge_http.go index de9cd2407..9a522a4f6 100644 --- a/provider/acme/challenge_http.go +++ b/provider/acme/challenge_http.go @@ -1,6 +1,7 @@ package acme import ( + "context" "net" "net/http" "time" @@ -18,23 +19,56 @@ type challengeHTTP struct { Store Store } -// Present presents a challenge to obtain new ACME certificate +// Present presents a challenge to obtain new ACME certificate. func (c *challengeHTTP) Present(domain, token, keyAuth string) error { return c.Store.SetHTTPChallengeToken(token, domain, []byte(keyAuth)) } -// CleanUp cleans the challenges when certificate is obtained +// CleanUp cleans the challenges when certificate is obtained. func (c *challengeHTTP) CleanUp(domain, token, keyAuth string) error { return c.Store.RemoveHTTPChallengeToken(token, domain) } -// Timeout calculates the maximum of time allowed to resolved an ACME challenge +// Timeout calculates the maximum of time allowed to resolved an ACME challenge. func (c *challengeHTTP) Timeout() (timeout, interval time.Duration) { return 60 * time.Second, 5 * time.Second } -func getTokenValue(token, domain string, store Store) []byte { - log.Debugf("Looking for an existing ACME challenge for token %v...", token) +// Append adds routes on internal router +func (p *Provider) Append(router *mux.Router) { + router.Methods(http.MethodGet). + Path(acme.HTTP01ChallengePath("{token}")). + Handler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + vars := mux.Vars(req) + + ctx := log.With(context.Background(), log.Str(log.ProviderName, "acme")) + logger := log.FromContext(ctx) + + if token, ok := vars["token"]; ok { + domain, _, err := net.SplitHostPort(req.Host) + if err != nil { + logger.Debugf("Unable to split host and port: %v. Fallback to request host.", err) + domain = req.Host + } + + tokenValue := getTokenValue(ctx, token, domain, p.Store) + if len(tokenValue) > 0 { + rw.WriteHeader(http.StatusOK) + _, err = rw.Write(tokenValue) + if err != nil { + logger.Errorf("Unable to write token: %v", err) + } + return + } + } + rw.WriteHeader(http.StatusNotFound) + })) +} + +func getTokenValue(ctx context.Context, token, domain string, store Store) []byte { + logger := log.FromContext(ctx) + logger.Debugf("Retrieving the ACME challenge for token %v...", token) + var result []byte operation := func() error { @@ -44,43 +78,16 @@ func getTokenValue(token, domain string, store Store) []byte { } notify := func(err error, time time.Duration) { - log.Errorf("Error getting challenge for token retrying in %s", time) + logger.Errorf("Error getting challenge for token retrying in %s", time) } ebo := backoff.NewExponentialBackOff() ebo.MaxElapsedTime = 60 * time.Second err := backoff.RetryNotify(safe.OperationWithRecover(operation), ebo, notify) if err != nil { - log.Errorf("Error getting challenge for token: %v", err) + logger.Errorf("Cannot retrieve the ACME challenge for token %v: %v", token, err) return []byte{} } return result } - -// AddRoutes add routes on internal router -func (p *Provider) AddRoutes(router *mux.Router) { - router.Methods(http.MethodGet). - Path(acme.HTTP01ChallengePath("{token}")). - Handler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - vars := mux.Vars(req) - if token, ok := vars["token"]; ok { - domain, _, err := net.SplitHostPort(req.Host) - if err != nil { - log.Debugf("Unable to split host and port: %v. Fallback to request host.", err) - domain = req.Host - } - - tokenValue := getTokenValue(token, domain, p.Store) - if len(tokenValue) > 0 { - rw.WriteHeader(http.StatusOK) - _, err = rw.Write(tokenValue) - if err != nil { - log.Errorf("Unable to write token : %v", err) - } - return - } - } - rw.WriteHeader(http.StatusNotFound) - })) -} diff --git a/provider/acme/challenge_tls.go b/provider/acme/challenge_tls.go index 39498cdb5..6b818b2e1 100644 --- a/provider/acme/challenge_tls.go +++ b/provider/acme/challenge_tls.go @@ -15,7 +15,8 @@ type challengeTLSALPN struct { } func (c *challengeTLSALPN) Present(domain, token, keyAuth string) error { - log.Debugf("TLS Challenge Present temp certificate for %s", domain) + log.WithoutContext().WithField(log.ProviderName, "acme"). + Debugf("TLS Challenge Present temp certificate for %s", domain) certPEMBlock, keyPEMBlock, err := acme.TLSALPNChallengeBlocks(domain, keyAuth) if err != nil { @@ -27,7 +28,8 @@ func (c *challengeTLSALPN) Present(domain, token, keyAuth string) error { } func (c *challengeTLSALPN) CleanUp(domain, token, keyAuth string) error { - log.Debugf("TLS Challenge CleanUp temp certificate for %s", domain) + log.WithoutContext().WithField(log.ProviderName, "acme"). + Debugf("TLS Challenge CleanUp temp certificate for %s", domain) return c.Store.RemoveTLSChallenge(domain) } diff --git a/provider/acme/local_store.go b/provider/acme/local_store.go index 97714e7eb..03127cb08 100644 --- a/provider/acme/local_store.go +++ b/provider/acme/local_store.go @@ -42,6 +42,8 @@ func (s *LocalStore) get() (*StoredData, error) { } if hasData { + logger := log.WithoutContext().WithField(log.ProviderName, "acme") + f, err := os.Open(s.filename) if err != nil { return nil, err @@ -66,7 +68,7 @@ func (s *LocalStore) get() (*StoredData, error) { return nil, err } if isOldRegistration { - log.Debug("Reset ACME account.") + logger.Debug("Reseting ACME account.") s.storedData.Account = nil s.SaveDataChan <- s.storedData } @@ -76,7 +78,7 @@ func (s *LocalStore) get() (*StoredData, error) { var certificates []*Certificate for _, certificate := range s.storedData.Certificates { if len(certificate.Certificate) == 0 || len(certificate.Key) == 0 { - log.Debugf("Delete certificate %v for domains %v which have no value.", certificate, certificate.Domain.ToStrArray()) + logger.Debugf("Deleting empty certificate %v for %v", certificate, certificate.Domain.ToStrArray()) continue } certificates = append(certificates, certificate) @@ -95,15 +97,16 @@ func (s *LocalStore) get() (*StoredData, error) { // listenSaveAction listens to a chan to store ACME data in json format into LocalStore.filename func (s *LocalStore) listenSaveAction() { safe.Go(func() { + logger := log.WithoutContext().WithField(log.ProviderName, "acme") for object := range s.SaveDataChan { data, err := json.MarshalIndent(object, "", " ") if err != nil { - log.Error(err) + logger.Error(err) } err = ioutil.WriteFile(s.filename, data, 0600) if err != nil { - log.Error(err) + logger.Error(err) } } }) diff --git a/provider/acme/provider.go b/provider/acme/provider.go index f5f46fa49..8f39938e3 100644 --- a/provider/acme/provider.go +++ b/provider/acme/provider.go @@ -1,6 +1,7 @@ package acme import ( + "context" "crypto/tls" "crypto/x509" "fmt" @@ -15,6 +16,7 @@ import ( "github.com/cenk/backoff" "github.com/containous/flaeg/parse" + "github.com/containous/traefik/config" "github.com/containous/traefik/log" "github.com/containous/traefik/rules" "github.com/containous/traefik/safe" @@ -29,8 +31,8 @@ import ( ) var ( - // OSCPMustStaple enables OSCP stapling as from https://github.com/xenolf/lego/issues/270 - OSCPMustStaple = false + // oscpMustStaple enables OSCP stapling as from https://github.com/xenolf/lego/issues/270 + oscpMustStaple = false ) // Configuration holds ACME configuration provided by users @@ -49,23 +51,6 @@ type Configuration struct { Domains []types.Domain `description:"CN and SANs (alternative domains) to each main domain using format: --acme.domains='main.com,san1.com,san2.com' --acme.domains='*.main.net'. No SANs for wildcards domain. Wildcard domains only accepted with DNSChallenge"` } -// Provider holds configurations of the provider. -type Provider struct { - *Configuration - Store Store - certificates []*Certificate - account *Account - client *acme.Client - certsChan chan *Certificate - configurationChan chan<- types.ConfigMessage - certificateStore *traefiktls.CertificateStore - clientMutex sync.Mutex - configFromListenerChan chan types.Configuration - pool *safe.Pool - resolvingDomains map[string]struct{} - resolvingDomainsMutex sync.RWMutex -} - // Certificate is a struct which contains all data needed from an ACME certificate type Certificate struct { Domain types.Domain @@ -79,8 +64,9 @@ type DNSChallenge struct { DelayBeforeCheck parse.Duration `description:"Assume DNS propagates after a delay in seconds rather than finding and querying nameservers."` Resolvers types.DNSResolvers `description:"Use following DNS servers to resolve the FQDN authority."` DisablePropagationCheck bool `description:"Disable the DNS propagation checks before notifying ACME that the DNS challenge is ready. [not recommended]"` - preCheckTimeout time.Duration - preCheckInterval time.Duration + + preCheckTimeout time.Duration + preCheckInterval time.Duration } // HTTPChallenge contains HTTP challenge Configuration @@ -91,8 +77,25 @@ type HTTPChallenge struct { // TLSChallenge contains TLS challenge Configuration type TLSChallenge struct{} +// Provider holds configurations of the provider. +type Provider struct { + *Configuration + Store Store + certificates []*Certificate + account *Account + client *acme.Client + certsChan chan *Certificate + configurationChan chan<- config.Message + certificateStore *traefiktls.CertificateStore + clientMutex sync.Mutex + configFromListenerChan chan config.Configuration + pool *safe.Pool + resolvingDomains map[string]struct{} + resolvingDomainsMutex sync.RWMutex +} + // SetConfigListenerChan initializes the configFromListenerChan -func (p *Provider) SetConfigListenerChan(configFromListenerChan chan types.Configuration) { +func (p *Provider) SetConfigListenerChan(configFromListenerChan chan config.Configuration) { p.configFromListenerChan = configFromListenerChan } @@ -102,13 +105,15 @@ func (p *Provider) SetCertificateStore(certificateStore *traefiktls.CertificateS } // ListenConfiguration sets a new Configuration into the configFromListenerChan -func (p *Provider) ListenConfiguration(config types.Configuration) { +func (p *Provider) ListenConfiguration(config config.Configuration) { p.configFromListenerChan <- config } // ListenRequest resolves new certificates for a domain from an incoming request and return a valid Certificate to serve (onDemand option) func (p *Provider) ListenRequest(domain string) (*tls.Certificate, error) { - acmeCert, err := p.resolveCertificate(types.Domain{Main: domain}, false) + ctx := log.With(context.Background(), log.Str(log.ProviderName, "acme")) + + acmeCert, err := p.resolveCertificate(ctx, types.Domain{Main: domain}, false) if acmeCert == nil || err != nil { return nil, err } @@ -120,9 +125,13 @@ func (p *Provider) ListenRequest(domain string) (*tls.Certificate, error) { // Init for compatibility reason the BaseProvider implements an empty Init func (p *Provider) Init(_ types.Constraints) error { + ctx := log.With(context.Background(), log.Str(log.ProviderName, "acme")) + logger := log.FromContext(ctx) + acme.UserAgent = fmt.Sprintf("containous-traefik/%s", version.Version) + if p.ACMELogging { - legolog.Logger = fmtlog.New(log.WriterLevel(logrus.InfoLevel), "legolog: ", 0) + legolog.Logger = fmtlog.New(logger.WriterLevel(logrus.InfoLevel), "legolog: ", 0) } else { legolog.Logger = fmtlog.New(ioutil.Discard, "", 0) } @@ -138,8 +147,8 @@ func (p *Provider) Init(_ types.Constraints) error { } // Reset Account if caServer changed, thus registration URI can be updated - if p.account != nil && p.account.Registration != nil && !isAccountMatchingCaServer(p.account.Registration.URI, p.CAServer) { - log.Info("Account URI does not match the current CAServer. The account will be reset") + if p.account != nil && p.account.Registration != nil && !isAccountMatchingCaServer(ctx, p.account.Registration.URI, p.CAServer) { + logger.Info("Account URI does not match the current CAServer. The account will be reset.") p.account = nil } @@ -154,49 +163,56 @@ func (p *Provider) Init(_ types.Constraints) error { return nil } -func isAccountMatchingCaServer(accountURI string, serverURI string) bool { +func isAccountMatchingCaServer(ctx context.Context, accountURI string, serverURI string) bool { + logger := log.FromContext(ctx) + aru, err := url.Parse(accountURI) if err != nil { - log.Infof("Unable to parse account.Registration URL : %v", err) + logger.Infof("Unable to parse account.Registration URL: %v", err) return false } + cau, err := url.Parse(serverURI) if err != nil { - log.Infof("Unable to parse CAServer URL : %v", err) + logger.Infof("Unable to parse CAServer URL: %v", err) return false } + return cau.Hostname() == aru.Hostname() } // Provide allows the file provider to provide configurations to traefik // using the given Configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { +func (p *Provider) Provide(configurationChan chan<- config.Message, pool *safe.Pool) error { + ctx := log.With(context.Background(), log.Str(log.ProviderName, "acme")) + p.pool = pool - p.watchCertificate() - p.watchNewDomains() + p.watchCertificate(ctx) + p.watchNewDomains(ctx) p.configurationChan = configurationChan p.refreshCertificates() - p.deleteUnnecessaryDomains() + p.deleteUnnecessaryDomains(ctx) for i := 0; i < len(p.Domains); i++ { domain := p.Domains[i] safe.Go(func() { - if _, err := p.resolveCertificate(domain, true); err != nil { - log.Errorf("Unable to obtain ACME certificate for domains %q : %v", strings.Join(domain.ToStrArray(), ","), err) + if _, err := p.resolveCertificate(ctx, domain, true); err != nil { + log.WithoutContext().WithField(log.ProviderName, "acme"). + Errorf("Unable to obtain ACME certificate for domains %q : %v", strings.Join(domain.ToStrArray(), ","), err) } }) } - p.renewCertificates() + p.renewCertificates(ctx) ticker := time.NewTicker(24 * time.Hour) pool.Go(func(stop chan bool) { for { select { case <-ticker.C: - p.renewCertificates() + p.renewCertificates(ctx) case <-stop: ticker.Stop() return @@ -211,22 +227,25 @@ func (p *Provider) getClient() (*acme.Client, error) { p.clientMutex.Lock() defer p.clientMutex.Unlock() + ctx := log.With(context.Background(), log.Str(log.ProviderName, "acme")) + logger := log.FromContext(ctx) + if p.client != nil { return p.client, nil } - account, err := p.initAccount() + account, err := p.initAccount(ctx) if err != nil { return nil, err } - log.Debug("Building ACME client...") + logger.Debug("Building ACME client...") caServer := "https://acme-v02.api.letsencrypt.org/directory" if len(p.CAServer) > 0 { caServer = p.CAServer } - log.Debug(caServer) + logger.Debug(caServer) client, err := acme.NewClient(caServer, account, account.KeyType) if err != nil { @@ -235,11 +254,11 @@ func (p *Provider) getClient() (*acme.Client, error) { // New users will need to register; be sure to save it if account.GetRegistration() == nil { - log.Info("Register...") + logger.Info("Register...") - reg, err := client.Register(true) - if err != nil { - return nil, err + reg, errR := client.Register(true) + if errR != nil { + return nil, errR } account.Registration = reg @@ -253,12 +272,12 @@ func (p *Provider) getClient() (*acme.Client, error) { } if p.DNSChallenge != nil && len(p.DNSChallenge.Provider) > 0 { - log.Debugf("Using DNS Challenge provider: %s", p.DNSChallenge.Provider) + logger.Debugf("Using DNS Challenge provider: %s", p.DNSChallenge.Provider) SetRecursiveNameServers(p.DNSChallenge.Resolvers) SetPropagationCheck(p.DNSChallenge.DisablePropagationCheck) - err = dnsOverrideDelay(p.DNSChallenge.DelayBeforeCheck) + err = dnsOverrideDelay(ctx, p.DNSChallenge.DelayBeforeCheck) if err != nil { return nil, err } @@ -286,7 +305,7 @@ func (p *Provider) getClient() (*acme.Client, error) { } } else if p.HTTPChallenge != nil && len(p.HTTPChallenge.EntryPoint) > 0 { - log.Debug("Using HTTP Challenge provider.") + logger.Debug("Using HTTP Challenge provider.") client.ExcludeChallenges([]acme.Challenge{acme.DNS01, acme.TLSALPN01}) @@ -295,7 +314,7 @@ func (p *Provider) getClient() (*acme.Client, error) { return nil, err } } else if p.TLSChallenge != nil { - log.Debug("Using TLS Challenge provider.") + logger.Debug("Using TLS Challenge provider.") client.ExcludeChallenges([]acme.Challenge{acme.HTTP01, acme.DNS01}) @@ -311,10 +330,10 @@ func (p *Provider) getClient() (*acme.Client, error) { return p.client, nil } -func (p *Provider) initAccount() (*Account, error) { +func (p *Provider) initAccount(ctx context.Context) (*Account, error) { if p.account == nil || len(p.account.Email) == 0 { var err error - p.account, err = NewAccount(p.Email, p.KeyType) + p.account, err = NewAccount(ctx, p.Email, p.KeyType) if err != nil { return nil, err } @@ -322,59 +341,49 @@ func (p *Provider) initAccount() (*Account, error) { // Set the KeyType if not already defined in the account if len(p.account.KeyType) == 0 { - p.account.KeyType = GetKeyType(p.KeyType) + p.account.KeyType = GetKeyType(ctx, p.KeyType) } return p.account, nil } -func contains(entryPoints []string, acmeEntryPoint string) bool { - for _, entryPoint := range entryPoints { - if entryPoint == acmeEntryPoint { - return true - } - } - return false -} - -func (p *Provider) watchNewDomains() { +func (p *Provider) watchNewDomains(ctx context.Context) { p.pool.Go(func(stop chan bool) { for { select { case config := <-p.configFromListenerChan: - for _, frontend := range config.Frontends { - if !contains(frontend.EntryPoints, p.EntryPoint) { + for routerName, route := range config.Routers { + logger := log.FromContext(ctx).WithField(log.RouterName, routerName) + + // FIXME use new rule system + domainRules := rules.Rules{} + domains, err := domainRules.ParseDomains(route.Rule) + if err != nil { + logger.Errorf("Error parsing domains in provider ACME: %v", err) continue } - for _, route := range frontend.Routes { - domainRules := rules.Rules{} - domains, err := domainRules.ParseDomains(route.Rule) - if err != nil { - log.Errorf("Error parsing domains in provider ACME: %v", err) - continue - } - if len(domains) == 0 { - log.Debugf("No domain parsed in rule %q in provider ACME", route.Rule) - continue - } - - log.Debugf("Try to challenge certificate for domain %v founded in Host rule", domains) - - var domain types.Domain - if len(domains) > 0 { - domain = types.Domain{Main: domains[0]} - if len(domains) > 1 { - domain.SANs = domains[1:] - } - - safe.Go(func() { - if _, err := p.resolveCertificate(domain, false); err != nil { - log.Errorf("Unable to obtain ACME certificate for domains %q detected thanks to rule %q : %v", strings.Join(domains, ","), route.Rule, err) - } - }) - } + if len(domains) == 0 { + logger.Debugf("No domain parsed in rule %q in provider ACME", route.Rule) + continue } + + logger.Debugf("Try to challenge certificate for domain %v founded in Host rule", domains) + + var domain types.Domain + if len(domains) > 0 { + domain = types.Domain{Main: domains[0]} + if len(domains) > 1 { + domain.SANs = domains[1:] + } + + safe.Go(func() { + if _, err := p.resolveCertificate(ctx, domain, false); err != nil { + logger.Errorf("Unable to obtain ACME certificate for domains %q detected thanks to rule %q : %v", strings.Join(domains, ","), route.Rule, err) + } + }) + } + } case <-stop: return @@ -383,14 +392,14 @@ func (p *Provider) watchNewDomains() { }) } -func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurationFile bool) (*acme.CertificateResource, error) { - domains, err := p.getValidDomains(domain, domainFromConfigurationFile) +func (p *Provider) resolveCertificate(ctx context.Context, domain types.Domain, domainFromConfigurationFile bool) (*acme.CertificateResource, error) { + domains, err := p.getValidDomains(ctx, domain, domainFromConfigurationFile) if err != nil { return nil, err } // Check provided certificates - uncheckedDomains := p.getUncheckedDomains(domains, !domainFromConfigurationFile) + uncheckedDomains := p.getUncheckedDomains(ctx, domains, !domainFromConfigurationFile) if len(uncheckedDomains) == 0 { return nil, nil } @@ -398,7 +407,8 @@ func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurati p.addResolvingDomains(uncheckedDomains) defer p.removeResolvingDomains(uncheckedDomains) - log.Debugf("Loading ACME certificates %+v...", uncheckedDomains) + logger := log.FromContext(ctx) + logger.Debugf("Loading ACME certificates %+v...", uncheckedDomains) client, err := p.getClient() if err != nil { @@ -408,9 +418,9 @@ func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurati var certificate *acme.CertificateResource bundle := true if p.useCertificateWithRetry(uncheckedDomains) { - certificate, err = obtainCertificateWithRetry(domains, client, p.DNSChallenge.preCheckTimeout, p.DNSChallenge.preCheckInterval, bundle) + certificate, err = obtainCertificateWithRetry(ctx, domains, client, p.DNSChallenge.preCheckTimeout, p.DNSChallenge.preCheckInterval, bundle) } else { - certificate, err = client.ObtainCertificate(domains, bundle, nil, OSCPMustStaple) + certificate, err = client.ObtainCertificate(domains, bundle, nil, oscpMustStaple) } if err != nil { @@ -423,7 +433,7 @@ func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurati return nil, fmt.Errorf("domains %v generate certificate with no value: %v", uncheckedDomains, certificate) } - log.Debugf("Certificates obtained for domains %+v", uncheckedDomains) + logger.Debugf("Certificates obtained for domains %+v", uncheckedDomains) if len(uncheckedDomains) > 1 { domain = types.Domain{Main: uncheckedDomains[0], SANs: uncheckedDomains[1:]} @@ -479,17 +489,19 @@ func (p *Provider) useCertificateWithRetry(domains []string) bool { return false } -func obtainCertificateWithRetry(domains []string, client *acme.Client, timeout, interval time.Duration, bundle bool) (*acme.CertificateResource, error) { +func obtainCertificateWithRetry(ctx context.Context, domains []string, client *acme.Client, timeout, interval time.Duration, bundle bool) (*acme.CertificateResource, error) { + logger := log.FromContext(ctx) + var certificate *acme.CertificateResource var err error operation := func() error { - certificate, err = client.ObtainCertificate(domains, bundle, nil, OSCPMustStaple) + certificate, err = client.ObtainCertificate(domains, bundle, nil, oscpMustStaple) return err } notify := func(err error, time time.Duration) { - log.Errorf("Error obtaining certificate retrying in %s", time) + logger.Errorf("Error obtaining certificate retrying in %s", time) } // Define a retry backOff to let LEGO tries twice to obtain a certificate for both wildcard and root domain @@ -500,20 +512,20 @@ func obtainCertificateWithRetry(domains []string, client *acme.Client, timeout, err = backoff.RetryNotify(safe.OperationWithRecover(operation), rbo, notify) if err != nil { - log.Errorf("Error obtaining certificate: %v", err) + logger.Errorf("Error obtaining certificate: %v", err) return nil, err } return certificate, nil } -func dnsOverrideDelay(delay parse.Duration) error { +func dnsOverrideDelay(ctx context.Context, delay parse.Duration) error { if delay == 0 { return nil } if delay > 0 { - log.Debugf("Delaying %d rather than validating DNS propagation now.", delay) + log.FromContext(ctx).Debugf("Delaying %d rather than validating DNS propagation now.", delay) acme.PreCheckDNS = func(_, _ string) (bool, error) { time.Sleep(time.Duration(delay)) @@ -532,9 +544,11 @@ func (p *Provider) addCertificateForDomain(domain types.Domain, certificate []by // deleteUnnecessaryDomains deletes from the configuration : // - Duplicated domains // - Domains which are checked by wildcard domain -func (p *Provider) deleteUnnecessaryDomains() { +func (p *Provider) deleteUnnecessaryDomains(ctx context.Context) { var newDomains []types.Domain + logger := log.FromContext(ctx) + for idxDomainToCheck, domainToCheck := range p.Domains { keepDomain := true @@ -545,7 +559,7 @@ func (p *Provider) deleteUnnecessaryDomains() { if reflect.DeepEqual(domain, domainToCheck) { if idxDomainToCheck > idxDomain { - log.Warnf("The domain %v is duplicated in the configuration but will be process by ACME provider only once.", domainToCheck) + logger.Warnf("The domain %v is duplicated in the configuration but will be process by ACME provider only once.", domainToCheck) keepDomain = false } break @@ -557,11 +571,11 @@ func (p *Provider) deleteUnnecessaryDomains() { for _, domainProcessed := range domainToCheck.ToStrArray() { if idxDomain < idxDomainToCheck && isDomainAlreadyChecked(domainProcessed, domain.ToStrArray()) { // The domain is duplicated in a CN - log.Warnf("Domain %q is duplicated in the configuration or validated by the domain %v. It will be processed once.", domainProcessed, domain) + logger.Warnf("Domain %q is duplicated in the configuration or validated by the domain %v. It will be processed once.", domainProcessed, domain) continue } else if domain.Main != domainProcessed && strings.HasPrefix(domain.Main, "*") && isDomainAlreadyChecked(domainProcessed, []string{domain.Main}) { // Check if a wildcard can validate the domain - log.Warnf("Domain %q will not be processed by ACME provider because it is validated by the wildcard %q", domainProcessed, domain.Main) + logger.Warnf("Domain %q will not be processed by ACME provider because it is validated by the wildcard %q", domainProcessed, domain.Main) continue } newDomainsToCheck = append(newDomainsToCheck, domainProcessed) @@ -584,8 +598,9 @@ func (p *Provider) deleteUnnecessaryDomains() { p.Domains = newDomains } -func (p *Provider) watchCertificate() { +func (p *Provider) watchCertificate(ctx context.Context) { p.certsChan = make(chan *Certificate) + p.pool.Go(func(stop chan bool) { for { select { @@ -605,9 +620,8 @@ func (p *Provider) watchCertificate() { err := p.saveCertificates() if err != nil { - log.Error(err) + log.FromContext(ctx).Error(err) } - case <-stop: return } @@ -624,50 +638,53 @@ func (p *Provider) saveCertificates() error { } func (p *Provider) refreshCertificates() { - config := types.ConfigMessage{ + conf := config.Message{ ProviderName: "ACME", - Configuration: &types.Configuration{ - Backends: map[string]*types.Backend{}, - Frontends: map[string]*types.Frontend{}, - TLS: []*traefiktls.Configuration{}, + Configuration: &config.Configuration{ + Routers: map[string]*config.Router{}, + Middlewares: map[string]*config.Middleware{}, + Services: map[string]*config.Service{}, + TLS: []*traefiktls.Configuration{}, }, } for _, cert := range p.certificates { certificate := &traefiktls.Certificate{CertFile: traefiktls.FileOrContent(cert.Certificate), KeyFile: traefiktls.FileOrContent(cert.Key)} - config.Configuration.TLS = append(config.Configuration.TLS, &traefiktls.Configuration{Certificate: certificate, EntryPoints: []string{p.EntryPoint}}) + conf.Configuration.TLS = append(conf.Configuration.TLS, &traefiktls.Configuration{Certificate: certificate, EntryPoints: []string{p.EntryPoint}}) } - p.configurationChan <- config + p.configurationChan <- conf } -func (p *Provider) renewCertificates() { - log.Info("Testing certificate renew...") +func (p *Provider) renewCertificates(ctx context.Context) { + logger := log.FromContext(ctx) + + logger.Info("Testing certificate renew...") for _, certificate := range p.certificates { - crt, err := getX509Certificate(certificate) + crt, err := getX509Certificate(ctx, certificate) // If there's an error, we assume the cert is broken, and needs update // <= 30 days left, renew certificate if err != nil || crt == nil || crt.NotAfter.Before(time.Now().Add(24*30*time.Hour)) { client, err := p.getClient() if err != nil { - log.Infof("Error renewing certificate from LE : %+v, %v", certificate.Domain, err) + logger.Infof("Error renewing certificate from LE : %+v, %v", certificate.Domain, err) continue } - log.Infof("Renewing certificate from LE : %+v", certificate.Domain) + logger.Infof("Renewing certificate from LE : %+v", certificate.Domain) renewedCert, err := client.RenewCertificate(acme.CertificateResource{ Domain: certificate.Domain.Main, PrivateKey: certificate.Key, Certificate: certificate.Certificate, - }, true, OSCPMustStaple) + }, true, oscpMustStaple) if err != nil { - log.Errorf("Error renewing certificate from LE: %v, %v", certificate.Domain, err) + logger.Errorf("Error renewing certificate from LE: %v, %v", certificate.Domain, err) continue } if len(renewedCert.Certificate) == 0 || len(renewedCert.PrivateKey) == 0 { - log.Errorf("domains %v renew certificate with no value: %v", certificate.Domain.ToStrArray(), certificate) + logger.Errorf("domains %v renew certificate with no value: %v", certificate.Domain.ToStrArray(), certificate) continue } @@ -678,11 +695,11 @@ func (p *Provider) renewCertificates() { // Get provided certificate which check a domains list (Main and SANs) // from static and dynamic provided certificates -func (p *Provider) getUncheckedDomains(domainsToCheck []string, checkConfigurationDomains bool) []string { +func (p *Provider) getUncheckedDomains(ctx context.Context, domainsToCheck []string, checkConfigurationDomains bool) []string { p.resolvingDomainsMutex.RLock() defer p.resolvingDomainsMutex.RUnlock() - log.Debugf("Looking for provided certificate(s) to validate %q...", domainsToCheck) + log.FromContext(ctx).Debugf("Looking for provided certificate(s) to validate %q...", domainsToCheck) allDomains := p.certificateStore.GetAllDomains() @@ -703,10 +720,10 @@ func (p *Provider) getUncheckedDomains(domainsToCheck []string, checkConfigurati } } - return searchUncheckedDomains(domainsToCheck, allDomains) + return searchUncheckedDomains(ctx, domainsToCheck, allDomains) } -func searchUncheckedDomains(domainsToCheck []string, existentDomains []string) []string { +func searchUncheckedDomains(ctx context.Context, domainsToCheck []string, existentDomains []string) []string { var uncheckedDomains []string for _, domainToCheck := range domainsToCheck { if !isDomainAlreadyChecked(domainToCheck, existentDomains) { @@ -714,18 +731,21 @@ func searchUncheckedDomains(domainsToCheck []string, existentDomains []string) [ } } + logger := log.FromContext(ctx) if len(uncheckedDomains) == 0 { - log.Debugf("No ACME certificate generation required for domains %q.", domainsToCheck) + logger.Debugf("No ACME certificate generation required for domains %q.", domainsToCheck) } else { - log.Debugf("Domains %q need ACME certificates generation for domains %q.", domainsToCheck, strings.Join(uncheckedDomains, ",")) + logger.Debugf("Domains %q need ACME certificates generation for domains %q.", domainsToCheck, strings.Join(uncheckedDomains, ",")) } return uncheckedDomains } -func getX509Certificate(certificate *Certificate) (*x509.Certificate, error) { +func getX509Certificate(ctx context.Context, certificate *Certificate) (*x509.Certificate, error) { + logger := log.FromContext(ctx) + tlsCert, err := tls.X509KeyPair(certificate.Certificate, certificate.Key) if err != nil { - log.Errorf("Failed to load TLS keypair from ACME certificate for domain %q (SAN : %q), certificate will be renewed : %v", certificate.Domain.Main, strings.Join(certificate.Domain.SANs, ","), err) + logger.Errorf("Failed to load TLS key pair from ACME certificate for domain %q (SAN : %q), certificate will be renewed : %v", certificate.Domain.Main, strings.Join(certificate.Domain.SANs, ","), err) return nil, err } @@ -733,7 +753,7 @@ func getX509Certificate(certificate *Certificate) (*x509.Certificate, error) { if crt == nil { crt, err = x509.ParseCertificate(tlsCert.Certificate[0]) if err != nil { - log.Errorf("Failed to parse TLS keypair from ACME certificate for domain %q (SAN : %q), certificate will be renewed : %v", certificate.Domain.Main, strings.Join(certificate.Domain.SANs, ","), err) + logger.Errorf("Failed to parse TLS key pair from ACME certificate for domain %q (SAN : %q), certificate will be renewed : %v", certificate.Domain.Main, strings.Join(certificate.Domain.SANs, ","), err) } } @@ -741,7 +761,7 @@ func getX509Certificate(certificate *Certificate) (*x509.Certificate, error) { } // getValidDomains checks if given domain is allowed to generate a ACME certificate and return it -func (p *Provider) getValidDomains(domain types.Domain, wildcardAllowed bool) ([]string, error) { +func (p *Provider) getValidDomains(ctx context.Context, domain types.Domain, wildcardAllowed bool) ([]string, error) { domains := domain.ToStrArray() if len(domains) == 0 { return nil, errors.New("unable to generate a certificate in ACME provider when no domain is given") @@ -772,7 +792,7 @@ func (p *Provider) getValidDomains(domain types.Domain, wildcardAllowed bool) ([ canonicalDomain := types.CanonicalDomain(domain) cleanDomain := acme.UnFqdn(canonicalDomain) if canonicalDomain != cleanDomain { - log.Warnf("FQDN detected, please remove the trailing dot: %s", canonicalDomain) + log.FromContext(ctx).Warnf("FQDN detected, please remove the trailing dot: %s", canonicalDomain) } cleanDomains = append(cleanDomains, cleanDomain) } diff --git a/provider/acme/provider_test.go b/provider/acme/provider_test.go index b5287ba4e..f77e90f58 100644 --- a/provider/acme/provider_test.go +++ b/provider/acme/provider_test.go @@ -1,6 +1,7 @@ package acme import ( + "context" "crypto/tls" "testing" @@ -195,7 +196,7 @@ func TestGetUncheckedCertificates(t *testing.T) { resolvingDomains: test.resolvingDomains, } - domains := acmeProvider.getUncheckedDomains(test.domains, false) + domains := acmeProvider.getUncheckedDomains(context.Background(), test.domains, false) assert.Equal(t, len(test.expectedDomains), len(domains), "Unexpected domains.") }) } @@ -283,7 +284,7 @@ func TestGetValidDomain(t *testing.T) { acmeProvider := Provider{Configuration: &Configuration{DNSChallenge: test.dnsChallenge}} - domains, err := acmeProvider.getValidDomains(test.domains, test.wildcardAllowed) + domains, err := acmeProvider.getValidDomains(context.Background(), test.domains, test.wildcardAllowed) if len(test.expectedErr) > 0 { assert.EqualError(t, err, test.expectedErr, "Unexpected error.") @@ -465,7 +466,7 @@ func TestDeleteUnnecessaryDomains(t *testing.T) { acmeProvider := Provider{Configuration: &Configuration{Domains: test.domains}} - acmeProvider.deleteUnnecessaryDomains() + acmeProvider.deleteUnnecessaryDomains(context.Background()) assert.Equal(t, test.expectedDomains, acmeProvider.Domains, "unexpected domain") }) } @@ -539,7 +540,7 @@ func TestIsAccountMatchingCaServer(t *testing.T) { t.Run(test.desc, func(t *testing.T) { t.Parallel() - result := isAccountMatchingCaServer(test.accountURI, test.serverURI) + result := isAccountMatchingCaServer(context.Background(), test.accountURI, test.serverURI) assert.Equal(t, test.expected, result) }) @@ -675,7 +676,7 @@ func TestInitAccount(t *testing.T) { acmeProvider := Provider{account: test.account, Configuration: &Configuration{Email: test.email, KeyType: test.keyType}} - actualAccount, err := acmeProvider.initAccount() + actualAccount, err := acmeProvider.initAccount(context.Background()) assert.Nil(t, err, "Init account in error") assert.Equal(t, test.expectedAccount.Email, actualAccount.Email, "unexpected email account") assert.Equal(t, test.expectedAccount.KeyType, actualAccount.KeyType, "unexpected keyType account") diff --git a/provider/aggregator/aggregator.go b/provider/aggregator/aggregator.go new file mode 100644 index 000000000..f03400a2d --- /dev/null +++ b/provider/aggregator/aggregator.go @@ -0,0 +1,72 @@ +package aggregator + +import ( + "encoding/json" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/config/static" + "github.com/containous/traefik/log" + "github.com/containous/traefik/provider" + "github.com/containous/traefik/safe" + "github.com/containous/traefik/types" +) + +// ProviderAggregator aggregates providers. +type ProviderAggregator struct { + providers []provider.Provider + constraints types.Constraints +} + +// NewProviderAggregator returns an aggregate of all the providers configured in the static configuration. +func NewProviderAggregator(conf static.Configuration) ProviderAggregator { + p := ProviderAggregator{ + constraints: conf.Constraints, + } + + if conf.File != nil { + p.quietAddProvider(conf.File) + } + + return p +} + +func (p *ProviderAggregator) quietAddProvider(provider provider.Provider) { + err := p.AddProvider(provider) + if err != nil { + log.WithoutContext().Errorf("Error while initializing provider %T: %v", provider, err) + } +} + +// AddProvider adds a provider in the providers map. +func (p *ProviderAggregator) AddProvider(provider provider.Provider) error { + err := provider.Init(p.constraints) + if err != nil { + return err + } + p.providers = append(p.providers, provider) + return nil +} + +// Init the provider +func (p ProviderAggregator) Init(_ types.Constraints) error { + return nil +} + +// Provide calls the provide method of every providers +func (p ProviderAggregator) Provide(configurationChan chan<- config.Message, pool *safe.Pool) error { + for _, prd := range p.providers { + jsonConf, err := json.Marshal(prd) + if err != nil { + log.WithoutContext().Debugf("Cannot marshal the provider configuration %T: %v", prd, err) + } + log.WithoutContext().Infof("Starting provider %T %s", prd, jsonConf) + currentProvider := prd + safe.Go(func() { + err := currentProvider.Provide(configurationChan, pool) + if err != nil { + log.WithoutContext().Errorf("Cannot start the provider %T: %v", prd, err) + } + }) + } + return nil +} diff --git a/provider/base_provider.go b/provider/base_provider.go new file mode 100644 index 000000000..e3580ebbb --- /dev/null +++ b/provider/base_provider.go @@ -0,0 +1,141 @@ +package provider + +import ( + "bytes" + "io/ioutil" + "strings" + "text/template" + "unicode" + + "github.com/BurntSushi/toml" + "github.com/Masterminds/sprig" + "github.com/containous/traefik/autogen/gentemplates" + "github.com/containous/traefik/config" + "github.com/containous/traefik/log" + "github.com/containous/traefik/types" +) + +// BaseProvider should be inherited by providers. +type BaseProvider struct { + Watch bool `description:"Watch provider" export:"true"` + Filename string `description:"Override default configuration template. For advanced users :)" export:"true"` + Constraints types.Constraints `description:"Filter services by constraint, matching with Traefik tags." export:"true"` + Trace bool `description:"Display additional provider logs (if available)." export:"true"` + DebugLogGeneratedTemplate bool `description:"Enable debug logging of generated configuration template." export:"true"` +} + +// Init for compatibility reason the BaseProvider implements an empty Init. +func (p *BaseProvider) Init(constraints types.Constraints) error { + p.Constraints = append(p.Constraints, constraints...) + return nil +} + +// MatchConstraints must match with EVERY single constraint +// returns first constraint that do not match or nil. +func (p *BaseProvider) MatchConstraints(tags []string) (bool, *types.Constraint) { + // if there is no tags and no constraints, filtering is disabled + if len(tags) == 0 && len(p.Constraints) == 0 { + return true, nil + } + + for _, constraint := range p.Constraints { + // xor: if ok and constraint.MustMatch are equal, then no tag is currently matching with the constraint + if ok := constraint.MatchConstraintWithAtLeastOneTag(tags); ok != constraint.MustMatch { + return false, constraint + } + } + + // If no constraint or every constraints matching + return true, nil +} + +// GetConfiguration returns the provider configuration from default template (file or content) or overrode template file. +func (p *BaseProvider) GetConfiguration(defaultTemplate string, funcMap template.FuncMap, templateObjects interface{}) (*config.Configuration, error) { + tmplContent, err := p.getTemplateContent(defaultTemplate) + if err != nil { + return nil, err + } + return p.CreateConfiguration(tmplContent, funcMap, templateObjects) +} + +// CreateConfiguration creates a provider configuration from content using templating. +func (p *BaseProvider) CreateConfiguration(tmplContent string, funcMap template.FuncMap, templateObjects interface{}) (*config.Configuration, error) { + var defaultFuncMap = sprig.TxtFuncMap() + // tolower is deprecated in favor of sprig's lower function + defaultFuncMap["tolower"] = strings.ToLower + defaultFuncMap["normalize"] = Normalize + defaultFuncMap["split"] = split + for funcID, funcElement := range funcMap { + defaultFuncMap[funcID] = funcElement + } + + tmpl := template.New(p.Filename).Funcs(defaultFuncMap) + + _, err := tmpl.Parse(tmplContent) + if err != nil { + return nil, err + } + + var buffer bytes.Buffer + err = tmpl.Execute(&buffer, templateObjects) + if err != nil { + return nil, err + } + + var renderedTemplate = buffer.String() + if p.DebugLogGeneratedTemplate { + log.Debugf("Template content: %s", tmplContent) + log.Debugf("Rendering results: %s", renderedTemplate) + } + return p.DecodeConfiguration(renderedTemplate) +} + +// DecodeConfiguration Decodes a *types.Configuration from a content. +func (p *BaseProvider) DecodeConfiguration(content string) (*config.Configuration, error) { + configuration := new(config.Configuration) + if _, err := toml.Decode(content, configuration); err != nil { + return nil, err + } + return configuration, nil +} + +func (p *BaseProvider) getTemplateContent(defaultTemplateFile string) (string, error) { + if len(p.Filename) > 0 { + buf, err := ioutil.ReadFile(p.Filename) + if err != nil { + return "", err + } + return string(buf), nil + } + + if strings.HasSuffix(defaultTemplateFile, ".tmpl") { + buf, err := gentemplates.Asset(defaultTemplateFile) + if err != nil { + return "", err + } + return string(buf), nil + } + + return defaultTemplateFile, nil +} + +func split(sep, s string) []string { + return strings.Split(s, sep) +} + +// Normalize transforms a string that work with the rest of traefik. +// Replace '.' with '-' in quoted keys because of this issue https://github.com/BurntSushi/toml/issues/78 +func Normalize(name string) string { + fargs := func(c rune) bool { + return !unicode.IsLetter(c) && !unicode.IsNumber(c) + } + // get function + return strings.Join(strings.FieldsFunc(name, fargs), "-") +} + +// ReverseStringSlice inverts the order of the given slice of string. +func ReverseStringSlice(slice *[]string) { + for i, j := 0, len(*slice)-1; i < j; i, j = i+1, j-1 { + (*slice)[i], (*slice)[j] = (*slice)[j], (*slice)[i] + } +} diff --git a/provider/file/file.go b/provider/file/file.go index b54f07fcf..ef38f70c0 100644 --- a/provider/file/file.go +++ b/provider/file/file.go @@ -1,6 +1,7 @@ package file import ( + "context" "fmt" "io/ioutil" "os" @@ -9,6 +10,7 @@ import ( "strings" "text/template" + "github.com/containous/traefik/config" "github.com/containous/traefik/log" "github.com/containous/traefik/provider" "github.com/containous/traefik/safe" @@ -18,6 +20,8 @@ import ( "gopkg.in/fsnotify.v1" ) +const providerName = "file" + var _ provider.Provider = (*Provider)(nil) // Provider holds configurations of the provider. @@ -34,7 +38,7 @@ func (p *Provider) Init(constraints types.Constraints) error { // Provide allows the file provider to provide configurations to traefik // using the given configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { +func (p *Provider) Provide(configurationChan chan<- config.Message, pool *safe.Pool) error { configuration, err := p.BuildConfiguration() if err != nil { @@ -63,9 +67,11 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s // BuildConfiguration loads configuration either from file or a directory specified by 'Filename'/'Directory' // and returns a 'Configuration' object -func (p *Provider) BuildConfiguration() (*types.Configuration, error) { +func (p *Provider) BuildConfiguration() (*config.Configuration, error) { + ctx := log.With(context.Background(), log.Str(log.ProviderName, providerName)) + if len(p.Directory) > 0 { - return p.loadFileConfigFromDirectory(p.Directory, nil) + return p.loadFileConfigFromDirectory(ctx, p.Directory, nil) } if len(p.Filename) > 0 { @@ -79,7 +85,7 @@ func (p *Provider) BuildConfiguration() (*types.Configuration, error) { return nil, errors.New("error using file configuration backend, no filename defined") } -func (p *Provider) addWatcher(pool *safe.Pool, directory string, configurationChan chan<- types.ConfigMessage, callback func(chan<- types.ConfigMessage, fsnotify.Event)) error { +func (p *Provider) addWatcher(pool *safe.Pool, directory string, configurationChan chan<- config.Message, callback func(chan<- config.Message, fsnotify.Event)) error { watcher, err := fsnotify.NewWatcher() if err != nil { return fmt.Errorf("error creating file watcher: %s", err) @@ -115,14 +121,14 @@ func (p *Provider) addWatcher(pool *safe.Pool, directory string, configurationCh callback(configurationChan, evt) } case err := <-watcher.Errors: - log.Errorf("Watcher event error: %s", err) + log.WithoutContext().WithField(log.ProviderName, providerName).Errorf("Watcher event error: %s", err) } } }) return nil } -func (p *Provider) watcherCallback(configurationChan chan<- types.ConfigMessage, event fsnotify.Event) { +func (p *Provider) watcherCallback(configurationChan chan<- config.Message, event fsnotify.Event) { watchItem := p.TraefikFile if len(p.Directory) > 0 { watchItem = p.Directory @@ -130,23 +136,24 @@ func (p *Provider) watcherCallback(configurationChan chan<- types.ConfigMessage, watchItem = p.Filename } + logger := log.WithoutContext().WithField(log.ProviderName, providerName) + if _, err := os.Stat(watchItem); err != nil { - log.Debugf("Unable to watch %s : %v", watchItem, err) + logger.Errorf("Unable to watch %s : %v", watchItem, err) return } configuration, err := p.BuildConfiguration() - if err != nil { - log.Errorf("Error occurred during watcher callback: %s", err) + logger.Errorf("Error occurred during watcher callback: %s", err) return } sendConfigToChannel(configurationChan, configuration) } -func sendConfigToChannel(configurationChan chan<- types.ConfigMessage, configuration *types.Configuration) { - configurationChan <- types.ConfigMessage{ +func sendConfigToChannel(configurationChan chan<- config.Message, configuration *config.Configuration) { + configurationChan <- config.Message{ ProviderName: "file", Configuration: configuration, } @@ -163,13 +170,13 @@ func readFile(filename string) (string, error) { return "", fmt.Errorf("invalid filename: %s", filename) } -func (p *Provider) loadFileConfig(filename string, parseTemplate bool) (*types.Configuration, error) { +func (p *Provider) loadFileConfig(filename string, parseTemplate bool) (*config.Configuration, error) { fileContent, err := readFile(filename) if err != nil { return nil, fmt.Errorf("error reading configuration file: %s - %s", filename, err) } - var configuration *types.Configuration + var configuration *config.Configuration if parseTemplate { configuration, err = p.CreateConfiguration(fileContent, template.FuncMap{}, false) } else { @@ -179,16 +186,20 @@ func (p *Provider) loadFileConfig(filename string, parseTemplate bool) (*types.C if err != nil { return nil, err } - if configuration == nil || configuration.Backends == nil && configuration.Frontends == nil && configuration.TLS == nil { - configuration = &types.Configuration{ - Frontends: make(map[string]*types.Frontend), - Backends: make(map[string]*types.Backend), + + if configuration == nil || configuration.Routers == nil && configuration.Middlewares == nil && configuration.Services == nil && configuration.TLS == nil { + configuration = &config.Configuration{ + Routers: make(map[string]*config.Router), + Middlewares: make(map[string]*config.Middleware), + Services: make(map[string]*config.Service), } } return configuration, err } -func (p *Provider) loadFileConfigFromDirectory(directory string, configuration *types.Configuration) (*types.Configuration, error) { +func (p *Provider) loadFileConfigFromDirectory(ctx context.Context, directory string, configuration *config.Configuration) (*config.Configuration, error) { + logger := log.FromContext(ctx) + fileList, err := ioutil.ReadDir(directory) if err != nil { @@ -196,9 +207,10 @@ func (p *Provider) loadFileConfigFromDirectory(directory string, configuration * } if configuration == nil { - configuration = &types.Configuration{ - Frontends: make(map[string]*types.Frontend), - Backends: make(map[string]*types.Backend), + configuration = &config.Configuration{ + Routers: make(map[string]*config.Router), + Middlewares: make(map[string]*config.Middleware), + Services: make(map[string]*config.Service), } } @@ -206,7 +218,7 @@ func (p *Provider) loadFileConfigFromDirectory(directory string, configuration * for _, item := range fileList { if item.IsDir() { - configuration, err = p.loadFileConfigFromDirectory(filepath.Join(directory, item.Name()), configuration) + configuration, err = p.loadFileConfigFromDirectory(ctx, filepath.Join(directory, item.Name()), configuration) if err != nil { return configuration, fmt.Errorf("unable to load content configuration from subdirectory %s: %v", item, err) } @@ -215,38 +227,46 @@ func (p *Provider) loadFileConfigFromDirectory(directory string, configuration * continue } - var c *types.Configuration + var c *config.Configuration c, err = p.loadFileConfig(path.Join(directory, item.Name()), true) if err != nil { return configuration, err } - for backendName, backend := range c.Backends { - if _, exists := configuration.Backends[backendName]; exists { - log.Warnf("Backend %s already configured, skipping", backendName) + for name, conf := range c.Routers { + if _, exists := configuration.Routers[name]; exists { + logger.WithField(log.RouterName, name).Warn("Router already configured, skipping") } else { - configuration.Backends[backendName] = backend + configuration.Routers[name] = conf } } - for frontendName, frontend := range c.Frontends { - if _, exists := configuration.Frontends[frontendName]; exists { - log.Warnf("Frontend %s already configured, skipping", frontendName) + for name, conf := range c.Middlewares { + if _, exists := configuration.Middlewares[name]; exists { + logger.WithField(log.MiddlewareName, name).Warn("Middleware already configured, skipping") } else { - configuration.Frontends[frontendName] = frontend + configuration.Middlewares[name] = conf + } + } + + for name, conf := range c.Services { + if _, exists := configuration.Services[name]; exists { + logger.WithField(log.ServiceName, name).Warn("Service already configured, skipping") + } else { + configuration.Services[name] = conf } } for _, conf := range c.TLS { if _, exists := configTLSMaps[conf]; exists { - log.Warnf("TLS Configuration %v already configured, skipping", conf) + logger.Warnf("TLS Configuration %v already configured, skipping", conf) } else { configTLSMaps[conf] = struct{}{} } } - } + for conf := range configTLSMaps { configuration.TLS = append(configuration.TLS, conf) } diff --git a/provider/file/file_test.go b/provider/file/file_test.go index 4e8d6a517..c38eb61ea 100644 --- a/provider/file/file_test.go +++ b/provider/file/file_test.go @@ -9,193 +9,29 @@ import ( "testing" "time" + "github.com/containous/traefik/config" "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" ) -// createRandomFile Helper -func createRandomFile(t *testing.T, tempDir string, contents ...string) *os.File { - return createFile(t, tempDir, fmt.Sprintf("temp%d.toml", time.Now().UnixNano()), contents...) -} - -// createFile Helper -func createFile(t *testing.T, tempDir string, name string, contents ...string) *os.File { - t.Helper() - fileName := path.Join(tempDir, name) - - tempFile, err := os.Create(fileName) - if err != nil { - t.Fatal(err) - } - - for _, content := range contents { - _, err := tempFile.WriteString(content) - if err != nil { - t.Fatal(err) - } - } - - err = tempFile.Close() - if err != nil { - t.Fatal(err) - } - - return tempFile -} - -// createTempDir Helper -func createTempDir(t *testing.T, dir string) string { - t.Helper() - d, err := ioutil.TempDir("", dir) - if err != nil { - t.Fatal(err) - } - return d -} - -// createFrontendConfiguration Helper -func createFrontendConfiguration(n int) string { - conf := "[frontends]\n" - for i := 1; i <= n; i++ { - conf += fmt.Sprintf(` [frontends."frontend%[1]d"] - backend = "backend%[1]d" -`, i) - } - return conf -} - -// createBackendConfiguration Helper -func createBackendConfiguration(n int) string { - conf := "[backends]\n" - for i := 1; i <= n; i++ { - conf += fmt.Sprintf(` [backends.backend%[1]d] - [backends.backend%[1]d.servers.server1] - url = "http://172.17.0.%[1]d:80" -`, i) - } - return conf -} - -// createTLS Helper -func createTLS(n int) string { - var conf string - for i := 1; i <= n; i++ { - conf += fmt.Sprintf(`[[TLS]] - EntryPoints = ["https"] - [TLS.Certificate] - CertFile = "integration/fixtures/https/snitest%[1]d.com.cert" - KeyFile = "integration/fixtures/https/snitest%[1]d.com.key" -`, i) - } - return conf -} - type ProvideTestCase struct { - desc string - directoryContent []string - fileContent string - traefikFileContent string - expectedNumFrontend int - expectedNumBackend int - expectedNumTLSConf int -} - -func getTestCases() []ProvideTestCase { - return []ProvideTestCase{ - { - desc: "simple file", - fileContent: createFrontendConfiguration(2) + createBackendConfiguration(3) + createTLS(4), - expectedNumFrontend: 2, - expectedNumBackend: 3, - expectedNumTLSConf: 4, - }, - { - desc: "simple file and a traefik file", - fileContent: createFrontendConfiguration(2) + createBackendConfiguration(3) + createTLS(4), - traefikFileContent: ` - debug=true -`, - expectedNumFrontend: 2, - expectedNumBackend: 3, - expectedNumTLSConf: 4, - }, - { - desc: "template file", - fileContent: ` -[frontends] -{{ range $i, $e := until 20 }} - [frontends.frontend{{ $e }}] - backend = "backend" -{{ end }} -`, - expectedNumFrontend: 20, - }, - { - desc: "simple directory", - directoryContent: []string{ - createFrontendConfiguration(2), - createBackendConfiguration(3), - createTLS(4), - }, - expectedNumFrontend: 2, - expectedNumBackend: 3, - expectedNumTLSConf: 4, - }, - { - desc: "template in directory", - directoryContent: []string{ - ` -[frontends] -{{ range $i, $e := until 20 }} - [frontends.frontend{{ $e }}] - backend = "backend" -{{ end }} -`, - ` -[backends] -{{ range $i, $e := until 20 }} - [backends.backend{{ $e }}] - [backends.backend{{ $e }}.servers.server1] - url="http://127.0.0.1" -{{ end }} -`, - }, - expectedNumFrontend: 20, - expectedNumBackend: 20, - }, - { - desc: "simple traefik file", - traefikFileContent: ` - debug=true - [file] - ` + createFrontendConfiguration(2) + createBackendConfiguration(3) + createTLS(4), - expectedNumFrontend: 2, - expectedNumBackend: 3, - expectedNumTLSConf: 4, - }, - { - desc: "simple traefik file with templating", - traefikFileContent: ` - temp="{{ getTag \"test\" }}" - [file] - ` + createFrontendConfiguration(2) + createBackendConfiguration(3) + createTLS(4), - expectedNumFrontend: 2, - expectedNumBackend: 3, - expectedNumTLSConf: 4, - }, - } + desc string + directoryContent []string + fileContent string + traefikFileContent string + expectedNumRouter int + expectedNumService int + expectedNumTLSConf int } func TestProvideWithoutWatch(t *testing.T) { for _, test := range getTestCases() { - test := test t.Run(test.desc+" without watch", func(t *testing.T) { - t.Parallel() - provider, clean := createProvider(t, test, false) defer clean() - configChan := make(chan types.ConfigMessage) + configChan := make(chan config.Message) + + provider.DebugLogGeneratedTemplate = true go func() { err := provider.Provide(configChan, safe.NewPool(context.Background())) @@ -204,10 +40,10 @@ func TestProvideWithoutWatch(t *testing.T) { timeout := time.After(time.Second) select { - case config := <-configChan: - assert.Len(t, config.Configuration.Backends, test.expectedNumBackend) - assert.Len(t, config.Configuration.Frontends, test.expectedNumFrontend) - assert.Len(t, config.Configuration.TLS, test.expectedNumTLSConf) + case conf := <-configChan: + assert.Len(t, conf.Configuration.Services, test.expectedNumService) + assert.Len(t, conf.Configuration.Routers, test.expectedNumRouter) + assert.Len(t, conf.Configuration.TLS, test.expectedNumTLSConf) case <-timeout: t.Errorf("timeout while waiting for config") } @@ -217,13 +53,10 @@ func TestProvideWithoutWatch(t *testing.T) { func TestProvideWithWatch(t *testing.T) { for _, test := range getTestCases() { - test := test t.Run(test.desc+" with watch", func(t *testing.T) { - t.Parallel() - provider, clean := createProvider(t, test, true) defer clean() - configChan := make(chan types.ConfigMessage) + configChan := make(chan config.Message) go func() { err := provider.Provide(configChan, safe.NewPool(context.Background())) @@ -232,10 +65,10 @@ func TestProvideWithWatch(t *testing.T) { timeout := time.After(time.Second) select { - case config := <-configChan: - assert.Len(t, config.Configuration.Backends, 0) - assert.Len(t, config.Configuration.Frontends, 0) - assert.Len(t, config.Configuration.TLS, 0) + case conf := <-configChan: + assert.Len(t, conf.Configuration.Services, 0) + assert.Len(t, conf.Configuration.Routers, 0) + assert.Len(t, conf.Configuration.TLS, 0) case <-timeout: t.Errorf("timeout while waiting for config") } @@ -259,17 +92,17 @@ func TestProvideWithWatch(t *testing.T) { } timeout = time.After(time.Second * 1) - var numUpdates, numBackends, numFrontends, numTLSConfs int + var numUpdates, numServices, numRouters, numTLSConfs int for { select { - case config := <-configChan: + case conf := <-configChan: numUpdates++ - numBackends = len(config.Configuration.Backends) - numFrontends = len(config.Configuration.Frontends) - numTLSConfs = len(config.Configuration.TLS) - t.Logf("received update #%d: backends %d/%d, frontends %d/%d, TLS configs %d/%d", numUpdates, numBackends, test.expectedNumBackend, numFrontends, test.expectedNumFrontend, numTLSConfs, test.expectedNumTLSConf) + numServices = len(conf.Configuration.Services) + numRouters = len(conf.Configuration.Routers) + numTLSConfs = len(conf.Configuration.TLS) + t.Logf("received update #%d: services %d/%d, routers %d/%d, TLS configs %d/%d", numUpdates, numServices, test.expectedNumService, numRouters, test.expectedNumRouter, numTLSConfs, test.expectedNumTLSConf) - if numBackends == test.expectedNumBackend && numFrontends == test.expectedNumFrontend && numTLSConfs == test.expectedNumTLSConf { + if numServices == test.expectedNumService && numRouters == test.expectedNumRouter && numTLSConfs == test.expectedNumTLSConf { return } case <-timeout: @@ -282,7 +115,7 @@ func TestProvideWithWatch(t *testing.T) { func TestErrorWhenEmptyConfig(t *testing.T) { provider := &Provider{} - configChan := make(chan types.ConfigMessage) + configChan := make(chan config.Message) errorChan := make(chan struct{}) go func() { err := provider.Provide(configChan, safe.NewPool(context.Background())) @@ -300,6 +133,93 @@ func TestErrorWhenEmptyConfig(t *testing.T) { } } +func getTestCases() []ProvideTestCase { + return []ProvideTestCase{ + { + desc: "simple file", + fileContent: createRoutersConfiguration(3) + createServicesConfiguration(6) + createTLS(5), + expectedNumRouter: 3, + expectedNumService: 6, + expectedNumTLSConf: 5, + }, + { + desc: "simple file and a traefik file", + fileContent: createRoutersConfiguration(4) + createServicesConfiguration(8) + createTLS(4), + traefikFileContent: ` + debug=true +`, + expectedNumRouter: 4, + expectedNumService: 8, + expectedNumTLSConf: 4, + }, + { + desc: "template file", + fileContent: ` +[routers] +{{ range $i, $e := until 20 }} + [routers.router{{ $e }}] + service = "application" +{{ end }} +`, + expectedNumRouter: 20, + }, + { + desc: "simple directory", + directoryContent: []string{ + createRoutersConfiguration(2), + createServicesConfiguration(3), + createTLS(4), + }, + expectedNumRouter: 2, + expectedNumService: 3, + expectedNumTLSConf: 4, + }, + { + desc: "template in directory", + directoryContent: []string{ + ` +[routers] +{{ range $i, $e := until 20 }} + [routers.router{{ $e }}] + service = "application" +{{ end }} +`, + ` +[services] +{{ range $i, $e := until 20 }} + [services.application-{{ $e }}] + [[services.application-{{ $e }}.servers]] + url="http://127.0.0.1" + weight = 1 +{{ end }} +`, + }, + expectedNumRouter: 20, + expectedNumService: 20, + }, + { + desc: "simple traefik file", + traefikFileContent: ` + debug=true + [file] + ` + createRoutersConfiguration(2) + createServicesConfiguration(3) + createTLS(4), + expectedNumRouter: 2, + expectedNumService: 3, + expectedNumTLSConf: 4, + }, + { + desc: "simple traefik file with templating", + traefikFileContent: ` + temp="{{ getTag \"test\" }}" + [file] + ` + createRoutersConfiguration(2) + createServicesConfiguration(3) + createTLS(4), + expectedNumRouter: 2, + expectedNumService: 3, + expectedNumTLSConf: 4, + }, + } +} + func createProvider(t *testing.T, test ProvideTestCase, watch bool) (*Provider, func()) { tempDir := createTempDir(t, "testdir") @@ -336,3 +256,83 @@ func createProvider(t *testing.T, test ProvideTestCase, watch bool) (*Provider, os.Remove(tempDir) } } + +// createRandomFile Helper +func createRandomFile(t *testing.T, tempDir string, contents ...string) *os.File { + return createFile(t, tempDir, fmt.Sprintf("temp%d.toml", time.Now().UnixNano()), contents...) +} + +// createFile Helper +func createFile(t *testing.T, tempDir string, name string, contents ...string) *os.File { + t.Helper() + fileName := path.Join(tempDir, name) + + tempFile, err := os.Create(fileName) + if err != nil { + t.Fatal(err) + } + + for _, content := range contents { + _, err = tempFile.WriteString(content) + if err != nil { + t.Fatal(err) + } + } + + err = tempFile.Close() + if err != nil { + t.Fatal(err) + } + + return tempFile +} + +// createTempDir Helper +func createTempDir(t *testing.T, dir string) string { + t.Helper() + d, err := ioutil.TempDir("", dir) + if err != nil { + t.Fatal(err) + } + return d +} + +// createRoutersConfiguration Helper +func createRoutersConfiguration(n int) string { + conf := "[routers]\n" + for i := 1; i <= n; i++ { + conf += fmt.Sprintf(` +[routers."router%[1]d"] + service = "application-%[1]d" +`, i) + } + return conf +} + +// createServicesConfiguration Helper +func createServicesConfiguration(n int) string { + conf := "[services]\n" + for i := 1; i <= n; i++ { + conf += fmt.Sprintf(` +[services.application-%[1]d.loadbalancer] + [[services.application-%[1]d.loadbalancer.servers]] + url = "http://172.17.0.%[1]d:80" + weight = 1 +`, i) + } + return conf +} + +// createTLS Helper +func createTLS(n int) string { + var conf string + for i := 1; i <= n; i++ { + conf += fmt.Sprintf(`[[TLS]] + EntryPoints = ["https"] + [TLS.Certificate] + CertFile = "integration/fixtures/https/snitest%[1]d.com.cert" + KeyFile = "integration/fixtures/https/snitest%[1]d.com.key" +`, i) + } + return conf +} diff --git a/provider/provider.go b/provider/provider.go index 612cd530f..1708e1a48 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -1,16 +1,7 @@ package provider import ( - "bytes" - "io/ioutil" - "strings" - "text/template" - "unicode" - - "github.com/BurntSushi/toml" - "github.com/Masterminds/sprig" - "github.com/containous/traefik/autogen/gentemplates" - "github.com/containous/traefik/log" + "github.com/containous/traefik/config" "github.com/containous/traefik/safe" "github.com/containous/traefik/types" ) @@ -19,131 +10,6 @@ import ( type Provider interface { // Provide allows the provider to provide configurations to traefik // using the given configuration channel. - Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error + Provide(configurationChan chan<- config.Message, pool *safe.Pool) error Init(constraints types.Constraints) error } - -// BaseProvider should be inherited by providers -type BaseProvider struct { - Watch bool `description:"Watch provider" export:"true"` - Filename string `description:"Override default configuration template. For advanced users :)" export:"true"` - Constraints types.Constraints `description:"Filter services by constraint, matching with Traefik tags." export:"true"` - Trace bool `description:"Display additional provider logs (if available)." export:"true"` - DebugLogGeneratedTemplate bool `description:"Enable debug logging of generated configuration template." export:"true"` -} - -// Init for compatibility reason the BaseProvider implements an empty Init -func (p *BaseProvider) Init(constraints types.Constraints) error { - p.Constraints = append(p.Constraints, constraints...) - return nil -} - -// MatchConstraints must match with EVERY single constraint -// returns first constraint that do not match or nil -func (p *BaseProvider) MatchConstraints(tags []string) (bool, *types.Constraint) { - // if there is no tags and no constraints, filtering is disabled - if len(tags) == 0 && len(p.Constraints) == 0 { - return true, nil - } - - for _, constraint := range p.Constraints { - // xor: if ok and constraint.MustMatch are equal, then no tag is currently matching with the constraint - if ok := constraint.MatchConstraintWithAtLeastOneTag(tags); ok != constraint.MustMatch { - return false, constraint - } - } - - // If no constraint or every constraints matching - return true, nil -} - -// GetConfiguration return the provider configuration from default template (file or content) or overrode template file -func (p *BaseProvider) GetConfiguration(defaultTemplate string, funcMap template.FuncMap, templateObjects interface{}) (*types.Configuration, error) { - tmplContent, err := p.getTemplateContent(defaultTemplate) - if err != nil { - return nil, err - } - return p.CreateConfiguration(tmplContent, funcMap, templateObjects) -} - -// CreateConfiguration create a provider configuration from content using templating -func (p *BaseProvider) CreateConfiguration(tmplContent string, funcMap template.FuncMap, templateObjects interface{}) (*types.Configuration, error) { - var defaultFuncMap = sprig.TxtFuncMap() - // tolower is deprecated in favor of sprig's lower function - defaultFuncMap["tolower"] = strings.ToLower - defaultFuncMap["normalize"] = Normalize - defaultFuncMap["split"] = split - for funcID, funcElement := range funcMap { - defaultFuncMap[funcID] = funcElement - } - - tmpl := template.New(p.Filename).Funcs(defaultFuncMap) - - _, err := tmpl.Parse(tmplContent) - if err != nil { - return nil, err - } - - var buffer bytes.Buffer - err = tmpl.Execute(&buffer, templateObjects) - if err != nil { - return nil, err - } - - var renderedTemplate = buffer.String() - if p.DebugLogGeneratedTemplate { - log.Debugf("Template content: %s", tmplContent) - log.Debugf("Rendering results: %s", renderedTemplate) - } - return p.DecodeConfiguration(renderedTemplate) -} - -// DecodeConfiguration Decode a *types.Configuration from a content -func (p *BaseProvider) DecodeConfiguration(content string) (*types.Configuration, error) { - configuration := new(types.Configuration) - if _, err := toml.Decode(content, configuration); err != nil { - return nil, err - } - return configuration, nil -} - -func (p *BaseProvider) getTemplateContent(defaultTemplateFile string) (string, error) { - if len(p.Filename) > 0 { - buf, err := ioutil.ReadFile(p.Filename) - if err != nil { - return "", err - } - return string(buf), nil - } - - if strings.HasSuffix(defaultTemplateFile, ".tmpl") { - buf, err := gentemplates.Asset(defaultTemplateFile) - if err != nil { - return "", err - } - return string(buf), nil - } - - return defaultTemplateFile, nil -} - -func split(sep, s string) []string { - return strings.Split(s, sep) -} - -// Normalize transform a string that work with the rest of traefik -// Replace '.' with '-' in quoted keys because of this issue https://github.com/BurntSushi/toml/issues/78 -func Normalize(name string) string { - fargs := func(c rune) bool { - return !unicode.IsLetter(c) && !unicode.IsNumber(c) - } - // get function - return strings.Join(strings.FieldsFunc(name, fargs), "-") -} - -// ReverseStringSlice invert the order of the given slice of string -func ReverseStringSlice(slice *[]string) { - for i, j := 0, len(*slice)-1; i < j; i, j = i+1, j-1 { - (*slice)[i], (*slice)[j] = (*slice)[j], (*slice)[i] - } -} diff --git a/responsemodifiers/headers.go b/responsemodifiers/headers.go new file mode 100644 index 000000000..bd492472f --- /dev/null +++ b/responsemodifiers/headers.go @@ -0,0 +1,55 @@ +package responsemodifiers + +import ( + "net/http" + + "github.com/containous/traefik/config" + "github.com/unrolled/secure" +) + +func buildHeaders(headers *config.Headers) func(*http.Response) error { + opt := secure.Options{ + BrowserXssFilter: headers.BrowserXSSFilter, + ContentTypeNosniff: headers.ContentTypeNosniff, + ForceSTSHeader: headers.ForceSTSHeader, + FrameDeny: headers.FrameDeny, + IsDevelopment: headers.IsDevelopment, + SSLRedirect: headers.SSLRedirect, + SSLForceHost: headers.SSLForceHost, + SSLTemporaryRedirect: headers.SSLTemporaryRedirect, + STSIncludeSubdomains: headers.STSIncludeSubdomains, + STSPreload: headers.STSPreload, + ContentSecurityPolicy: headers.ContentSecurityPolicy, + CustomBrowserXssValue: headers.CustomBrowserXSSValue, + CustomFrameOptionsValue: headers.CustomFrameOptionsValue, + PublicKey: headers.PublicKey, + ReferrerPolicy: headers.ReferrerPolicy, + SSLHost: headers.SSLHost, + AllowedHosts: headers.AllowedHosts, + HostsProxyHeaders: headers.HostsProxyHeaders, + SSLProxyHeaders: headers.SSLProxyHeaders, + STSSeconds: headers.STSSeconds, + } + + return func(resp *http.Response) error { + if headers.HasCustomHeadersDefined() { + // Loop through Custom response headers + for header, value := range headers.CustomResponseHeaders { + if value == "" { + resp.Header.Del(header) + } else { + resp.Header.Set(header, value) + } + } + } + + if headers.HasSecureHeadersDefined() { + err := secure.New(opt).ModifyResponseHeaders(resp) + if err != nil { + return err + } + } + + return nil + } +} diff --git a/responsemodifiers/log.go b/responsemodifiers/log.go new file mode 100644 index 000000000..83be5dc16 --- /dev/null +++ b/responsemodifiers/log.go @@ -0,0 +1,13 @@ +package responsemodifiers + +import ( + "context" + + "github.com/containous/traefik/log" + "github.com/sirupsen/logrus" +) + +// getLogger creates a logger configured with the middleware fields. +func getLogger(ctx context.Context, middleware string, middlewareType string) logrus.FieldLogger { + return log.FromContext(ctx).WithField(log.MiddlewareName, middleware).WithField(log.MiddlewareType, middlewareType) +} diff --git a/responsemodifiers/response_modifier.go b/responsemodifiers/response_modifier.go new file mode 100644 index 000000000..3db627909 --- /dev/null +++ b/responsemodifiers/response_modifier.go @@ -0,0 +1,51 @@ +package responsemodifiers + +import ( + "context" + "net/http" + + "github.com/containous/traefik/config" +) + +// NewBuilder creates a builder. +func NewBuilder(configs map[string]*config.Middleware) *Builder { + return &Builder{configs: configs} +} + +// Builder holds builder configuration. +type Builder struct { + configs map[string]*config.Middleware +} + +// Build Builds the response modifier. +func (f *Builder) Build(ctx context.Context, names []string) func(*http.Response) error { + var modifiers []func(*http.Response) error + + for _, middleName := range names { + if conf, ok := f.configs[middleName]; ok { + if conf.Headers != nil { + getLogger(ctx, middleName, "Headers").Debug("Creating Middleware (ResponseModifier)") + + modifiers = append(modifiers, buildHeaders(conf.Headers)) + } else if conf.Chain != nil { + getLogger(ctx, middleName, "Chain").Debug("Creating Middleware (ResponseModifier)") + + modifiers = append(modifiers, f.Build(ctx, conf.Chain.Middlewares)) + } + } + } + + if len(modifiers) > 0 { + return func(resp *http.Response) error { + for i := len(modifiers); i > 0; i-- { + err := modifiers[i-1](resp) + if err != nil { + return err + } + } + return nil + } + } + + return func(response *http.Response) error { return nil } +} diff --git a/responsemodifiers/response_modifier_test.go b/responsemodifiers/response_modifier_test.go new file mode 100644 index 000000000..952f33486 --- /dev/null +++ b/responsemodifiers/response_modifier_test.go @@ -0,0 +1,181 @@ +package responsemodifiers + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares/headers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func stubResponse(_ map[string]*config.Middleware) *http.Response { + return &http.Response{Header: make(http.Header)} +} + +func TestBuilderBuild(t *testing.T) { + testCases := []struct { + desc string + middlewares []string + // buildResponse is needed because secure use a private context key + buildResponse func(map[string]*config.Middleware) *http.Response + conf map[string]*config.Middleware + assertResponse func(*testing.T, *http.Response) + }{ + { + desc: "no configuration", + middlewares: []string{"foo", "bar"}, + buildResponse: stubResponse, + conf: map[string]*config.Middleware{}, + assertResponse: func(t *testing.T, resp *http.Response) {}, + }, + { + desc: "one modifier", + middlewares: []string{"foo", "bar"}, + buildResponse: stubResponse, + conf: map[string]*config.Middleware{ + "foo": { + Headers: &config.Headers{ + CustomResponseHeaders: map[string]string{"X-Foo": "foo"}, + }, + }, + }, + assertResponse: func(t *testing.T, resp *http.Response) { + t.Helper() + + assert.Equal(t, resp.Header.Get("X-Foo"), "foo") + }, + }, + { + desc: "secure: one modifier", + middlewares: []string{"foo", "bar"}, + buildResponse: func(middlewares map[string]*config.Middleware) *http.Response { + ctx := context.Background() + + var request *http.Request + next := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + request = req + }) + + headerM := *middlewares["foo"].Headers + handler, err := headers.New(ctx, next, headerM, "secure") + require.NoError(t, err) + + handler.ServeHTTP(httptest.NewRecorder(), + httptest.NewRequest(http.MethodGet, "http://foo.com", nil)) + + return &http.Response{Header: make(http.Header), Request: request} + }, + conf: map[string]*config.Middleware{ + "foo": { + Headers: &config.Headers{ + ReferrerPolicy: "no-referrer", + }, + }, + "bar": { + Headers: &config.Headers{ + CustomResponseHeaders: map[string]string{"X-Bar": "bar"}, + }, + }, + }, + assertResponse: func(t *testing.T, resp *http.Response) { + t.Helper() + + assert.Equal(t, resp.Header.Get("Referrer-Policy"), "no-referrer") + }, + }, + { + desc: "two modifiers", + middlewares: []string{"foo", "bar"}, + buildResponse: stubResponse, + conf: map[string]*config.Middleware{ + "foo": { + Headers: &config.Headers{ + CustomResponseHeaders: map[string]string{"X-Foo": "foo"}, + }, + }, + "bar": { + Headers: &config.Headers{ + CustomResponseHeaders: map[string]string{"X-Bar": "bar"}, + }, + }, + }, + assertResponse: func(t *testing.T, resp *http.Response) { + t.Helper() + + assert.Equal(t, resp.Header.Get("X-Foo"), "foo") + assert.Equal(t, resp.Header.Get("X-Bar"), "bar") + }, + }, + { + desc: "modifier order", + middlewares: []string{"foo", "bar"}, + buildResponse: stubResponse, + conf: map[string]*config.Middleware{ + "foo": { + Headers: &config.Headers{ + CustomResponseHeaders: map[string]string{"X-Foo": "foo"}, + }, + }, + "bar": { + Headers: &config.Headers{ + CustomResponseHeaders: map[string]string{"X-Foo": "bar"}, + }, + }, + }, + assertResponse: func(t *testing.T, resp *http.Response) { + t.Helper() + + assert.Equal(t, resp.Header.Get("X-Foo"), "foo") + }, + }, + { + desc: "chain", + middlewares: []string{"chain"}, + buildResponse: stubResponse, + conf: map[string]*config.Middleware{ + "foo": { + Headers: &config.Headers{ + CustomResponseHeaders: map[string]string{"X-Foo": "foo"}, + }, + }, + "bar": { + Headers: &config.Headers{ + CustomResponseHeaders: map[string]string{"X-Foo": "bar"}, + }, + }, + "chain": { + Chain: &config.Chain{ + Middlewares: []string{"foo", "bar"}, + }, + }, + }, + assertResponse: func(t *testing.T, resp *http.Response) { + t.Helper() + + assert.Equal(t, resp.Header.Get("X-Foo"), "foo") + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + builder := NewBuilder(test.conf) + + rm := builder.Build(context.Background(), test.middlewares) + + resp := test.buildResponse(test.conf) + + err := rm(resp) + require.NoError(t, err) + + test.assertResponse(t, resp) + }) + } +} diff --git a/rules/rules.go b/rules/rules.go index b523a882f..2f0043633 100644 --- a/rules/rules.go +++ b/rules/rules.go @@ -10,9 +10,8 @@ import ( "github.com/containous/mux" "github.com/containous/traefik/hostresolver" - "github.com/containous/traefik/log" - "github.com/containous/traefik/middlewares" - "github.com/containous/traefik/types" + "github.com/containous/traefik/old/middlewares" + "github.com/containous/traefik/old/types" ) // Rules holds rule parsing and configuration @@ -39,7 +38,8 @@ func (r *Rules) host(hosts ...string) *mux.Route { if strings.EqualFold(reqH, host) || strings.EqualFold(flatH, host) { return true } - log.Debugf("CNAMEFlattening: request %s which resolved to %s, is not matched to route %s", reqH, flatH, host) + // FIXME + //log.Debugf("CNAMEFlattening: request %s which resolved to %s, is not matched to route %s", reqH, flatH, host) } return false } diff --git a/rules/rules_test.go b/rules/rules_test.go index a843e63ee..222aed363 100644 --- a/rules/rules_test.go +++ b/rules/rules_test.go @@ -6,9 +6,9 @@ import ( "testing" "github.com/containous/mux" - "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/old/middlewares" + "github.com/containous/traefik/old/types" "github.com/containous/traefik/testhelpers" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -117,7 +117,7 @@ func TestParseDomains(t *testing.T) { } } -func TestPriorites(t *testing.T) { +func TestPriorities(t *testing.T) { router := mux.NewRouter() router.StrictSlash(true) @@ -253,7 +253,6 @@ func TestHostRegexp(t *testing.T) { func TestParseInvalidSyntax(t *testing.T) { router := mux.NewRouter() - router.StrictSlash(true) rules := &Rules{Route: &types.ServerRoute{Route: router.NewRoute()}} expression01 := "Path: /path1;Query:param_one=true, /path2" diff --git a/server/middleware/middlewares.go b/server/middleware/middlewares.go new file mode 100644 index 000000000..b1b4d9f79 --- /dev/null +++ b/server/middleware/middlewares.go @@ -0,0 +1,292 @@ +package middleware + +import ( + "context" + "fmt" + "net/http" + + "github.com/containous/alice" + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares/addprefix" + "github.com/containous/traefik/middlewares/auth" + "github.com/containous/traefik/middlewares/buffering" + "github.com/containous/traefik/middlewares/chain" + "github.com/containous/traefik/middlewares/circuitbreaker" + "github.com/containous/traefik/middlewares/compress" + "github.com/containous/traefik/middlewares/customerrors" + "github.com/containous/traefik/middlewares/headers" + "github.com/containous/traefik/middlewares/ipwhitelist" + "github.com/containous/traefik/middlewares/maxconnection" + "github.com/containous/traefik/middlewares/passtlsclientcert" + "github.com/containous/traefik/middlewares/ratelimiter" + "github.com/containous/traefik/middlewares/redirect" + "github.com/containous/traefik/middlewares/replacepath" + "github.com/containous/traefik/middlewares/replacepathregex" + "github.com/containous/traefik/middlewares/retry" + "github.com/containous/traefik/middlewares/stripprefix" + "github.com/containous/traefik/middlewares/stripprefixregex" + "github.com/containous/traefik/middlewares/tracing" + "github.com/pkg/errors" +) + +// Builder the middleware builder +type Builder struct { + configs map[string]*config.Middleware + serviceBuilder serviceBuilder +} + +type serviceBuilder interface { + Build(ctx context.Context, serviceName string, responseModifier func(*http.Response) error) (http.Handler, error) +} + +// NewBuilder creates a new Builder +func NewBuilder(configs map[string]*config.Middleware, serviceBuilder serviceBuilder) *Builder { + return &Builder{configs: configs, serviceBuilder: serviceBuilder} +} + +// BuildChain creates a middleware chain +func (b *Builder) BuildChain(ctx context.Context, middlewares []string) (*alice.Chain, error) { + chain := alice.New() + for _, middlewareName := range middlewares { + if _, ok := b.configs[middlewareName]; !ok { + return nil, fmt.Errorf("middleware %q does not exist", middlewareName) + } + + constructor, err := b.buildConstructor(ctx, middlewareName, *b.configs[middlewareName]) + if err != nil { + return nil, err + } + if constructor != nil { + chain = chain.Append(constructor) + } + } + return &chain, nil +} + +func (b *Builder) buildConstructor(ctx context.Context, middlewareName string, config config.Middleware) (alice.Constructor, error) { + var middleware alice.Constructor + badConf := errors.New("cannot create middleware %q: multi-types middleware not supported, consider declaring two different pieces of middleware instead") + + // AddPrefix + if config.AddPrefix != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return addprefix.New(ctx, next, *config.AddPrefix, middlewareName) + } + } else { + return nil, badConf + } + } + + // BasicAuth + if config.BasicAuth != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return auth.NewBasic(ctx, next, *config.BasicAuth, middlewareName) + } + } else { + return nil, badConf + } + } + + // Buffering + if config.Buffering != nil && config.MaxConn.Amount != 0 { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return buffering.New(ctx, next, *config.Buffering, middlewareName) + } + } else { + return nil, badConf + } + } + + // Chain + if config.Chain != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return chain.New(ctx, next, *config.Chain, b, middlewareName) + } + } else { + return nil, badConf + } + } + + // CircuitBreaker + if config.CircuitBreaker != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return circuitbreaker.New(ctx, next, *config.CircuitBreaker, middlewareName) + } + } else { + return nil, badConf + } + } + + // Compress + if config.Compress != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return compress.New(ctx, next, middlewareName) + } + } else { + return nil, badConf + } + } + + // CustomErrors + if config.Errors != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return customerrors.New(ctx, next, *config.Errors, b.serviceBuilder, middlewareName) + } + } else { + return nil, badConf + } + } + + // DigestAuth + if config.DigestAuth != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return auth.NewDigest(ctx, next, *config.DigestAuth, middlewareName) + } + } else { + return nil, badConf + } + } + + // ForwardAuth + if config.ForwardAuth != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return auth.NewForward(ctx, next, *config.ForwardAuth, middlewareName) + } + } else { + return nil, badConf + } + } + + // Headers + if config.Headers != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return headers.New(ctx, next, *config.Headers, middlewareName) + } + } else { + return nil, badConf + } + } + + // IPWhiteList + if config.IPWhiteList != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return ipwhitelist.New(ctx, next, *config.IPWhiteList, middlewareName) + } + } else { + return nil, badConf + } + } + + // MaxConn + if config.MaxConn != nil && config.MaxConn.Amount != 0 { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return maxconnection.New(ctx, next, *config.MaxConn, middlewareName) + } + } else { + return nil, badConf + } + } + + // PassTLSClientCert + if config.PassTLSClientCert != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return passtlsclientcert.New(ctx, next, *config.PassTLSClientCert, middlewareName) + } + } else { + return nil, badConf + } + } + + // RateLimit + if config.RateLimit != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return ratelimiter.New(ctx, next, *config.RateLimit, middlewareName) + } + } else { + return nil, badConf + } + } + + // Redirect + if config.Redirect != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return redirect.New(ctx, next, *config.Redirect, middlewareName) + } + } else { + return nil, badConf + } + } + + // ReplacePath + if config.ReplacePath != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return replacepath.New(ctx, next, *config.ReplacePath, middlewareName) + } + } else { + return nil, badConf + } + } + + // ReplacePathRegex + if config.ReplacePathRegex != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return replacepathregex.New(ctx, next, *config.ReplacePathRegex, middlewareName) + } + } else { + return nil, badConf + } + } + + // Retry + if config.Retry != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + // FIXME missing metrics / accessLog + return retry.New(ctx, next, *config.Retry, retry.Listeners{}, middlewareName) + } + } else { + return nil, badConf + } + } + + // StripPrefix + if config.StripPrefix != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return stripprefix.New(ctx, next, *config.StripPrefix, middlewareName) + } + } else { + return nil, badConf + } + } + + // StripPrefixRegex + if config.StripPrefixRegex != nil { + if middleware == nil { + middleware = func(next http.Handler) (http.Handler, error) { + return stripprefixregex.New(ctx, next, *config.StripPrefixRegex, middlewareName) + } + } else { + return nil, badConf + } + } + + return tracing.Wrap(ctx, middleware), nil +} diff --git a/server/middleware/middlewares_test.go b/server/middleware/middlewares_test.go new file mode 100644 index 000000000..76a66d6c5 --- /dev/null +++ b/server/middleware/middlewares_test.go @@ -0,0 +1,127 @@ +package middleware + +import ( + "context" + "net/http" + "testing" + + "github.com/containous/traefik/config" + "github.com/stretchr/testify/require" +) + +func TestMiddlewaresRegistry_BuildMiddlewareCircuitBreaker(t *testing.T) { + testConfig := map[string]*config.Middleware{ + "empty": { + CircuitBreaker: &config.CircuitBreaker{ + Expression: "", + }, + }, + "foo": { + CircuitBreaker: &config.CircuitBreaker{ + Expression: "NetworkErrorRatio() > 0.5", + }, + }, + } + middlewaresBuilder := NewBuilder(testConfig, nil) + + emptyHandler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {}) + + testCases := []struct { + desc string + middlewareID string + expectedError bool + }{ + { + desc: "Should fail at creating a circuit breaker with an empty expression", + expectedError: true, + middlewareID: "empty", + }, { + desc: "Should create a circuit breaker with a valid expression", + expectedError: false, + middlewareID: "foo", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + constructor, err := middlewaresBuilder.buildConstructor(context.Background(), test.middlewareID, *testConfig[test.middlewareID]) + require.NoError(t, err) + + middleware, err2 := constructor(emptyHandler) + + if test.expectedError { + require.Error(t, err2) + } else { + require.NoError(t, err) + require.NotNil(t, middleware) + } + }) + } +} + +func TestMiddlewaresRegistry_BuildChainNilConfig(t *testing.T) { + testConfig := map[string]*config.Middleware{ + "empty": {}, + } + middlewaresBuilder := NewBuilder(testConfig, nil) + + chain, err := middlewaresBuilder.BuildChain(context.Background(), []string{"empty"}) + require.NoError(t, err) + + _, err = chain.Then(nil) + require.NoError(t, err) +} + +func TestMiddlewaresRegistry_BuildMiddlewareAddPrefix(t *testing.T) { + testConfig := map[string]*config.Middleware{ + "empty": { + AddPrefix: &config.AddPrefix{ + Prefix: "", + }, + }, + "foo": { + AddPrefix: &config.AddPrefix{ + Prefix: "foo/", + }, + }, + } + + middlewaresBuilder := NewBuilder(testConfig, nil) + + testCases := []struct { + desc string + middlewareID string + expectedError bool + }{ + { + desc: "Should not create an emty AddPrefix middleware when given an empty prefix", + middlewareID: "empty", + expectedError: true, + }, { + desc: "Should create an AddPrefix middleware when given a valid configuration", + middlewareID: "foo", + expectedError: false, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + constructor, err := middlewaresBuilder.buildConstructor(context.Background(), test.middlewareID, *testConfig[test.middlewareID]) + require.NoError(t, err) + + middleware, err2 := constructor(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {})) + + if test.expectedError { + require.Error(t, err2) + } else { + require.NoError(t, err) + require.NotNil(t, middleware) + } + }) + } +} diff --git a/server/roundtripper.go b/server/roundtripper.go new file mode 100644 index 000000000..195d8d713 --- /dev/null +++ b/server/roundtripper.go @@ -0,0 +1,94 @@ +package server + +import ( + "crypto/tls" + "crypto/x509" + "net" + "net/http" + "time" + + "github.com/containous/traefik/log" + "github.com/containous/traefik/old/configuration" + traefiktls "github.com/containous/traefik/tls" + "golang.org/x/net/http2" +) + +type h2cTransportWrapper struct { + *http2.Transport +} + +func (t *h2cTransportWrapper) RoundTrip(req *http.Request) (*http.Response, error) { + req.URL.Scheme = "http" + return t.Transport.RoundTrip(req) +} + +// createHTTPTransport creates an http.Transport configured with the GlobalConfiguration settings. +// For the settings that can't be configured in Traefik it uses the default http.Transport settings. +// An exception to this is the MaxIdleConns setting as we only provide the option MaxIdleConnsPerHost +// in Traefik at this point in time. Setting this value to the default of 100 could lead to confusing +// behavior and backwards compatibility issues. +func createHTTPTransport(globalConfiguration configuration.GlobalConfiguration) (*http.Transport, error) { + dialer := &net.Dialer{ + Timeout: configuration.DefaultDialTimeout, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + if globalConfiguration.ForwardingTimeouts != nil { + dialer.Timeout = time.Duration(globalConfiguration.ForwardingTimeouts.DialTimeout) + } + + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: dialer.DialContext, + MaxIdleConnsPerHost: globalConfiguration.MaxIdleConnsPerHost, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } + + transport.RegisterProtocol("h2c", &h2cTransportWrapper{ + Transport: &http2.Transport{ + DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) { + return net.Dial(netw, addr) + }, + AllowHTTP: true, + }, + }) + + if globalConfiguration.ForwardingTimeouts != nil { + transport.ResponseHeaderTimeout = time.Duration(globalConfiguration.ForwardingTimeouts.ResponseHeaderTimeout) + } + + if globalConfiguration.InsecureSkipVerify { + transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + } + + if len(globalConfiguration.RootCAs) > 0 { + transport.TLSClientConfig = &tls.Config{ + RootCAs: createRootCACertPool(globalConfiguration.RootCAs), + } + } + + err := http2.ConfigureTransport(transport) + if err != nil { + return nil, err + } + + return transport, nil +} + +func createRootCACertPool(rootCAs traefiktls.FilesOrContents) *x509.CertPool { + roots := x509.NewCertPool() + + for _, cert := range rootCAs { + certContent, err := cert.Read() + if err != nil { + log.WithoutContext().Error("Error while read RootCAs", err) + continue + } + roots.AppendCertsFromPEM(certContent) + } + + return roots +} diff --git a/server/router/route_appender_aggregator.go b/server/router/route_appender_aggregator.go new file mode 100644 index 000000000..3eebf4064 --- /dev/null +++ b/server/router/route_appender_aggregator.go @@ -0,0 +1,119 @@ +package router + +import ( + "context" + + "github.com/containous/alice" + "github.com/containous/mux" + "github.com/containous/traefik/api" + "github.com/containous/traefik/config/static" + "github.com/containous/traefik/log" + "github.com/containous/traefik/metrics" + "github.com/containous/traefik/safe" + "github.com/containous/traefik/types" +) + +// chainBuilder The contract of the middleware builder +type chainBuilder interface { + BuildChain(ctx context.Context, middlewares []string) (*alice.Chain, error) +} + +// NewRouteAppenderAggregator Creates a new RouteAppenderAggregator +func NewRouteAppenderAggregator(ctx context.Context, chainBuilder chainBuilder, conf static.Configuration, entryPointName string, currentConfiguration *safe.Safe) *RouteAppenderAggregator { + logger := log.FromContext(ctx) + + aggregator := &RouteAppenderAggregator{} + + // FIXME add REST + + if conf.API != nil && conf.API.EntryPoint == entryPointName { + chain, err := chainBuilder.BuildChain(ctx, conf.API.Middlewares) + if err != nil { + logger.Error(err) + } else { + aggregator.AddAppender(&WithMiddleware{ + appender: api.Handler{ + EntryPoint: conf.API.EntryPoint, + Dashboard: conf.API.Dashboard, + Statistics: conf.API.Statistics, + DashboardAssets: conf.API.DashboardAssets, + CurrentConfigurations: currentConfiguration, + }, + routerMiddlewares: chain, + }) + } + } + + if conf.Ping != nil && conf.Ping.EntryPoint == entryPointName { + chain, err := chainBuilder.BuildChain(ctx, conf.Ping.Middlewares) + if err != nil { + logger.Error(err) + } else { + aggregator.AddAppender(&WithMiddleware{ + appender: conf.Ping, + routerMiddlewares: chain, + }) + } + } + + if conf.Metrics != nil && conf.Metrics.Prometheus != nil && conf.Metrics.Prometheus.EntryPoint == entryPointName { + chain, err := chainBuilder.BuildChain(ctx, conf.Metrics.Prometheus.Middlewares) + if err != nil { + logger.Error(err) + } else { + aggregator.AddAppender(&WithMiddleware{ + appender: metrics.PrometheusHandler{}, + routerMiddlewares: chain, + }) + } + } + + return aggregator +} + +// RouteAppenderAggregator RouteAppender that aggregate other RouteAppender +type RouteAppenderAggregator struct { + appenders []types.RouteAppender +} + +// Append Adds routes to the router +func (r *RouteAppenderAggregator) Append(systemRouter *mux.Router) { + for _, router := range r.appenders { + router.Append(systemRouter) + } +} + +// AddAppender adds a router in the aggregator +func (r *RouteAppenderAggregator) AddAppender(router types.RouteAppender) { + r.appenders = append(r.appenders, router) +} + +// WithMiddleware router with internal middleware +type WithMiddleware struct { + appender types.RouteAppender + routerMiddlewares *alice.Chain +} + +// Append Adds routes to the router +func (wm *WithMiddleware) Append(systemRouter *mux.Router) { + realRouter := systemRouter.PathPrefix("/").Subrouter() + + wm.appender.Append(realRouter) + + if err := realRouter.Walk(wrapRoute(wm.routerMiddlewares)); err != nil { + log.WithoutContext().Error(err) + } +} + +// wrapRoute with middlewares +func wrapRoute(middlewares *alice.Chain) func(*mux.Route, *mux.Router, []*mux.Route) error { + return func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + handler, err := middlewares.Then(route.GetHandler()) + if err != nil { + return err + } + + route.Handler(handler) + return nil + } +} diff --git a/server/router/route_appender_aggregator_test.go b/server/router/route_appender_aggregator_test.go new file mode 100644 index 000000000..f35a131d2 --- /dev/null +++ b/server/router/route_appender_aggregator_test.go @@ -0,0 +1,116 @@ +package router + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/containous/alice" + "github.com/containous/mux" + "github.com/containous/traefik/config/static" + "github.com/containous/traefik/ping" + "github.com/stretchr/testify/assert" +) + +type ChainBuilderMock struct { + middles map[string]alice.Constructor +} + +func (c *ChainBuilderMock) BuildChain(ctx context.Context, middles []string) (*alice.Chain, error) { + chain := alice.New() + + for _, mName := range middles { + if constructor, ok := c.middles[mName]; ok { + chain = chain.Append(constructor) + } + } + + return &chain, nil +} + +func TestNewRouteAppenderAggregator(t *testing.T) { + testCases := []struct { + desc string + staticConf static.Configuration + middles map[string]alice.Constructor + expected map[string]int + }{ + { + desc: "API with auth, ping without auth", + staticConf: static.Configuration{ + API: &static.API{ + EntryPoint: "traefik", + Middlewares: []string{"dumb"}, + }, + Ping: &ping.Handler{ + EntryPoint: "traefik", + }, + EntryPoints: &static.EntryPoints{ + EntryPointList: map[string]static.EntryPoint{ + "traefik": {}, + }, + }, + }, + middles: map[string]alice.Constructor{ + "dumb": func(_ http.Handler) (http.Handler, error) { + return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + }), nil + }, + }, + expected: map[string]int{ + "/wrong": http.StatusBadGateway, + "/ping": http.StatusOK, + //"/.well-known/acme-challenge/token": http.StatusNotFound, // FIXME + "/api/providers": http.StatusUnauthorized, + }, + }, + { + desc: "Wrong entrypoint name", + staticConf: static.Configuration{ + API: &static.API{ + EntryPoint: "no", + }, + EntryPoints: &static.EntryPoints{ + EntryPointList: map[string]static.EntryPoint{ + "traefik": {}, + }, + }, + }, + expected: map[string]int{ + "/api/providers": http.StatusBadGateway, + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + chainBuilder := &ChainBuilderMock{middles: test.middles} + + ctx := context.Background() + + router := NewRouteAppenderAggregator(ctx, chainBuilder, test.staticConf, "traefik", nil) + + internalMuxRouter := mux.NewRouter() + router.Append(internalMuxRouter) + + internalMuxRouter.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadGateway) + }) + + actual := make(map[string]int) + for calledURL := range test.expected { + recorder := httptest.NewRecorder() + request := httptest.NewRequest(http.MethodGet, calledURL, nil) + internalMuxRouter.ServeHTTP(recorder, request) + actual[calledURL] = recorder.Code + } + + assert.Equal(t, test.expected, actual) + }) + } +} diff --git a/server/router/route_appender_factory.go b/server/router/route_appender_factory.go new file mode 100644 index 000000000..a9f209656 --- /dev/null +++ b/server/router/route_appender_factory.go @@ -0,0 +1,38 @@ +package router + +import ( + "context" + + "github.com/containous/traefik/config/static" + "github.com/containous/traefik/provider/acme" + "github.com/containous/traefik/safe" + "github.com/containous/traefik/server/middleware" + "github.com/containous/traefik/types" +) + +// NewRouteAppenderFactory Creates a new RouteAppenderFactory +func NewRouteAppenderFactory(staticConfiguration static.Configuration, entryPointName string, acmeProvider *acme.Provider) *RouteAppenderFactory { + return &RouteAppenderFactory{ + staticConfiguration: staticConfiguration, + entryPointName: entryPointName, + acmeProvider: acmeProvider, + } +} + +// RouteAppenderFactory A factory of RouteAppender +type RouteAppenderFactory struct { + staticConfiguration static.Configuration + entryPointName string + acmeProvider *acme.Provider +} + +// NewAppender Creates a new RouteAppender +func (r *RouteAppenderFactory) NewAppender(ctx context.Context, middlewaresBuilder *middleware.Builder, currentConfiguration *safe.Safe) types.RouteAppender { + aggregator := NewRouteAppenderAggregator(ctx, middlewaresBuilder, r.staticConfiguration, r.entryPointName, currentConfiguration) + + if r.acmeProvider != nil && r.acmeProvider.HTTPChallenge != nil && r.acmeProvider.HTTPChallenge.EntryPoint == r.entryPointName { + aggregator.AddAppender(r.acmeProvider) + } + + return aggregator +} diff --git a/server/router/router.go b/server/router/router.go new file mode 100644 index 000000000..0c707efe9 --- /dev/null +++ b/server/router/router.go @@ -0,0 +1,183 @@ +package router + +import ( + "context" + "fmt" + "net/http" + + "github.com/containous/alice" + "github.com/containous/mux" + "github.com/containous/traefik/config" + "github.com/containous/traefik/log" + "github.com/containous/traefik/middlewares/accesslog" + "github.com/containous/traefik/middlewares/recovery" + "github.com/containous/traefik/middlewares/tracing" + "github.com/containous/traefik/responsemodifiers" + "github.com/containous/traefik/server/middleware" + "github.com/containous/traefik/server/service" +) + +const ( + recoveryMiddlewareName = "traefik-internal-recovery" +) + +// NewManager Creates a new Manager +func NewManager(routers map[string]*config.Router, + serviceManager *service.Manager, middlewaresBuilder *middleware.Builder, modifierBuilder *responsemodifiers.Builder, +) *Manager { + return &Manager{ + routerHandlers: make(map[string]http.Handler), + configs: routers, + serviceManager: serviceManager, + middlewaresBuilder: middlewaresBuilder, + modifierBuilder: modifierBuilder, + } +} + +// Manager A route/router manager +type Manager struct { + routerHandlers map[string]http.Handler + configs map[string]*config.Router + serviceManager *service.Manager + middlewaresBuilder *middleware.Builder + modifierBuilder *responsemodifiers.Builder +} + +// BuildHandlers Builds handler for all entry points +func (m *Manager) BuildHandlers(rootCtx context.Context, entryPoints []string, defaultEntryPoints []string) map[string]http.Handler { + entryPointsRouters := m.filteredRouters(rootCtx, entryPoints, defaultEntryPoints) + + entryPointHandlers := make(map[string]http.Handler) + for entryPointName, routers := range entryPointsRouters { + ctx := log.With(rootCtx, log.Str(log.EntryPointName, entryPointName)) + + handler, err := m.buildEntryPointHandler(ctx, routers) + if err != nil { + log.FromContext(ctx).Error(err) + continue + } + entryPointHandlers[entryPointName] = handler + } + + m.serviceManager.LaunchHealthCheck() + + return entryPointHandlers +} + +func contains(entryPoints []string, entryPointName string) bool { + for _, name := range entryPoints { + if name == entryPointName { + return true + } + } + return false +} + +func (m *Manager) filteredRouters(ctx context.Context, entryPoints []string, defaultEntryPoints []string) map[string]map[string]*config.Router { + entryPointsRouters := make(map[string]map[string]*config.Router) + + for rtName, rt := range m.configs { + eps := rt.EntryPoints + if len(eps) == 0 { + eps = defaultEntryPoints + } + for _, entryPointName := range eps { + if !contains(entryPoints, entryPointName) { + log.FromContext(log.With(ctx, log.Str(log.EntryPointName, entryPointName))). + Errorf("entryPoint %q doesn't exist", entryPointName) + continue + } + + if _, ok := entryPointsRouters[entryPointName]; !ok { + entryPointsRouters[entryPointName] = make(map[string]*config.Router) + } + + entryPointsRouters[entryPointName][rtName] = rt + } + } + + return entryPointsRouters +} + +func (m *Manager) buildEntryPointHandler(ctx context.Context, configs map[string]*config.Router) (http.Handler, error) { + router := mux.NewRouter(). + SkipClean(true) + + for routerName, routerConfig := range configs { + ctx = log.With(ctx, log.Str(log.RouterName, routerName)) + logger := log.FromContext(ctx) + + handler, err := m.buildRouterHandler(ctx, routerName) + if err != nil { + logger.Error(err) + continue + } + + err = addRoute(ctx, router, routerConfig.Rule, routerConfig.Priority, handler) + if err != nil { + logger.Error(err) + continue + } + } + + router.SortRoutes() + + chain := alice.New() + chain = chain.Append(func(next http.Handler) (http.Handler, error) { + return recovery.New(ctx, next, recoveryMiddlewareName) + }) + + return chain.Then(router) +} + +func (m *Manager) buildRouterHandler(ctx context.Context, routerName string) (http.Handler, error) { + if handler, ok := m.routerHandlers[routerName]; ok { + return handler, nil + } + + configRouter, ok := m.configs[routerName] + if !ok { + return nil, fmt.Errorf("no configuration for %s", routerName) + } + + handler, err := m.buildHandler(ctx, configRouter, routerName) + if err != nil { + return nil, err + } + + handlerWithAccessLog, err := alice.New(func(next http.Handler) (http.Handler, error) { + return accesslog.NewFieldHandler(next, accesslog.RouterName, routerName, nil), nil + }).Then(handler) + if err != nil { + log.FromContext(ctx).Error(err) + m.routerHandlers[routerName] = handler + } else { + m.routerHandlers[routerName] = handlerWithAccessLog + } + + return m.routerHandlers[routerName], nil +} + +func (m *Manager) buildHandler(ctx context.Context, router *config.Router, routerName string) (http.Handler, error) { + rm := m.modifierBuilder.Build(ctx, router.Middlewares) + + sHandler, err := m.serviceManager.Build(ctx, router.Service, rm) + if err != nil { + return nil, err + } + + mHandler, err := m.middlewaresBuilder.BuildChain(ctx, router.Middlewares) + if err != nil { + return nil, err + } + + alHandler := func(next http.Handler) (http.Handler, error) { + return accesslog.NewFieldHandler(next, accesslog.ServiceName, router.Service, accesslog.AddServiceFields), nil + } + + tHandler := func(next http.Handler) (http.Handler, error) { + return tracing.NewForwarder(ctx, routerName, router.Service, next), nil + } + + return alice.New().Append(alHandler).Extend(*mHandler).Append(tHandler).Then(sHandler) +} diff --git a/server/router/router_test.go b/server/router/router_test.go new file mode 100644 index 000000000..71d6f8a14 --- /dev/null +++ b/server/router/router_test.go @@ -0,0 +1,334 @@ +package router + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/middlewares/accesslog" + "github.com/containous/traefik/middlewares/requestdecorator" + "github.com/containous/traefik/responsemodifiers" + "github.com/containous/traefik/server/middleware" + "github.com/containous/traefik/server/service" + "github.com/containous/traefik/testhelpers" + "github.com/containous/traefik/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRouterManager_Get(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + + type ExpectedResult struct { + StatusCode int + RequestHeaders map[string]string + } + + testCases := []struct { + desc string + routersConfig map[string]*config.Router + serviceConfig map[string]*config.Service + middlewaresConfig map[string]*config.Middleware + entryPoints []string + defaultEntryPoints []string + expected ExpectedResult + }{ + { + desc: "no middleware", + routersConfig: map[string]*config.Router{ + "foo": { + EntryPoints: []string{"web"}, + Service: "foo-service", + Rule: "Host:foo.bar", + }, + }, + serviceConfig: map[string]*config.Service{ + "foo-service": { + LoadBalancer: &config.LoadBalancerService{ + Servers: []config.Server{ + { + URL: server.URL, + Weight: 1, + }, + }, + Method: "wrr", + }, + }, + }, + entryPoints: []string{"web"}, + expected: ExpectedResult{StatusCode: http.StatusOK}, + }, + { + desc: "no middleware, default entry point", + routersConfig: map[string]*config.Router{ + "foo": { + Service: "foo-service", + Rule: "Host:foo.bar", + }, + }, + serviceConfig: map[string]*config.Service{ + "foo-service": { + LoadBalancer: &config.LoadBalancerService{ + Servers: []config.Server{ + { + URL: server.URL, + Weight: 1, + }, + }, + Method: "wrr", + }, + }, + }, + entryPoints: []string{"web"}, + defaultEntryPoints: []string{"web"}, + expected: ExpectedResult{StatusCode: http.StatusOK}, + }, + { + desc: "no middleware, no matching", + routersConfig: map[string]*config.Router{ + "foo": { + EntryPoints: []string{"web"}, + Service: "foo-service", + Rule: "Host:bar.bar", + }, + }, + serviceConfig: map[string]*config.Service{ + "foo-service": { + LoadBalancer: &config.LoadBalancerService{ + Servers: []config.Server{ + { + URL: server.URL, + Weight: 1, + }, + }, + Method: "wrr", + }, + }, + }, + entryPoints: []string{"web"}, + expected: ExpectedResult{StatusCode: http.StatusNotFound}, + }, + { + desc: "middleware: headers > auth", + routersConfig: map[string]*config.Router{ + "foo": { + EntryPoints: []string{"web"}, + Middlewares: []string{"headers-middle", "auth-middle"}, + Service: "foo-service", + Rule: "Host:foo.bar", + }, + }, + serviceConfig: map[string]*config.Service{ + "foo-service": { + LoadBalancer: &config.LoadBalancerService{ + Servers: []config.Server{ + { + URL: server.URL, + Weight: 1, + }, + }, + Method: "wrr", + }, + }, + }, + middlewaresConfig: map[string]*config.Middleware{ + "auth-middle": { + BasicAuth: &config.BasicAuth{ + Users: []string{"toto:titi"}, + }, + }, + "headers-middle": { + Headers: &config.Headers{ + CustomRequestHeaders: map[string]string{"X-Apero": "beer"}, + }, + }, + }, + entryPoints: []string{"web"}, + expected: ExpectedResult{ + StatusCode: http.StatusUnauthorized, + RequestHeaders: map[string]string{ + "X-Apero": "beer", + }, + }, + }, + { + desc: "middleware: auth > header", + routersConfig: map[string]*config.Router{ + "foo": { + EntryPoints: []string{"web"}, + Middlewares: []string{"auth-middle", "headers-middle"}, + Service: "foo-service", + Rule: "Host:foo.bar", + }, + }, + serviceConfig: map[string]*config.Service{ + "foo-service": { + LoadBalancer: &config.LoadBalancerService{ + Servers: []config.Server{ + { + URL: server.URL, + Weight: 1, + }, + }, + Method: "wrr", + }, + }, + }, + middlewaresConfig: map[string]*config.Middleware{ + "auth-middle": { + BasicAuth: &config.BasicAuth{ + Users: []string{"toto:titi"}, + }, + }, + "headers-middle": { + Headers: &config.Headers{ + CustomRequestHeaders: map[string]string{"X-Apero": "beer"}, + }, + }, + }, + entryPoints: []string{"web"}, + expected: ExpectedResult{ + StatusCode: http.StatusUnauthorized, + RequestHeaders: map[string]string{ + "X-Apero": "", + }, + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + serviceManager := service.NewManager(test.serviceConfig, http.DefaultTransport) + middlewaresBuilder := middleware.NewBuilder(test.middlewaresConfig, serviceManager) + responseModifierFactory := responsemodifiers.NewBuilder(test.middlewaresConfig) + + routerManager := NewManager(test.routersConfig, serviceManager, middlewaresBuilder, responseModifierFactory) + + handlers := routerManager.BuildHandlers(context.Background(), test.entryPoints, test.defaultEntryPoints) + + w := httptest.NewRecorder() + req := testhelpers.MustNewRequest(http.MethodGet, "http://foo.bar/", nil) + + reqHost := requestdecorator.New(nil) + reqHost.ServeHTTP(w, req, handlers["web"].ServeHTTP) + + assert.Equal(t, test.expected.StatusCode, w.Code) + + for key, value := range test.expected.RequestHeaders { + assert.Equal(t, value, req.Header.Get(key)) + } + }) + } +} + +func TestAccessLog(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + + testCases := []struct { + desc string + routersConfig map[string]*config.Router + serviceConfig map[string]*config.Service + middlewaresConfig map[string]*config.Middleware + entryPoints []string + defaultEntryPoints []string + expected string + }{ + { + desc: "apply routerName in accesslog (first match)", + routersConfig: map[string]*config.Router{ + "foo": { + EntryPoints: []string{"web"}, + Service: "foo-service", + Rule: "Host:foo.bar", + }, + "bar": { + EntryPoints: []string{"web"}, + Service: "foo-service", + Rule: "Host:bar.foo", + }, + }, + serviceConfig: map[string]*config.Service{ + "foo-service": { + LoadBalancer: &config.LoadBalancerService{ + Servers: []config.Server{ + { + URL: server.URL, + Weight: 1, + }, + }, + Method: "wrr", + }, + }, + }, + entryPoints: []string{"web"}, + expected: "foo", + }, + { + desc: "apply routerName in accesslog (second match)", + routersConfig: map[string]*config.Router{ + "foo": { + EntryPoints: []string{"web"}, + Service: "foo-service", + Rule: "Host:bar.foo", + }, + "bar": { + EntryPoints: []string{"web"}, + Service: "foo-service", + Rule: "Host:foo.bar", + }, + }, + serviceConfig: map[string]*config.Service{ + "foo-service": { + LoadBalancer: &config.LoadBalancerService{ + Servers: []config.Server{ + { + URL: server.URL, + Weight: 1, + }, + }, + Method: "wrr", + }, + }, + }, + entryPoints: []string{"web"}, + expected: "bar", + }, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + + serviceManager := service.NewManager(test.serviceConfig, http.DefaultTransport) + middlewaresBuilder := middleware.NewBuilder(test.middlewaresConfig, serviceManager) + responseModifierFactory := responsemodifiers.NewBuilder(test.middlewaresConfig) + + routerManager := NewManager(test.routersConfig, serviceManager, middlewaresBuilder, responseModifierFactory) + + handlers := routerManager.BuildHandlers(context.Background(), test.entryPoints, test.defaultEntryPoints) + + w := httptest.NewRecorder() + req := testhelpers.MustNewRequest(http.MethodGet, "http://foo.bar/", nil) + + accesslogger, err := accesslog.NewHandler(&types.AccessLog{ + Format: "json", + }) + require.NoError(t, err) + + reqHost := requestdecorator.New(nil) + + accesslogger.ServeHTTP(w, req, http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + reqHost.ServeHTTP(w, req, handlers["web"].ServeHTTP) + + data := accesslog.GetLogData(req) + require.NotNil(t, data) + + assert.Equal(t, test.expected, data.Core[accesslog.RouterName]) + })) + }) + } +} diff --git a/server/router/rules.go b/server/router/rules.go new file mode 100644 index 000000000..fdda85103 --- /dev/null +++ b/server/router/rules.go @@ -0,0 +1,163 @@ +package router + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/containous/mux" + "github.com/containous/traefik/log" + "github.com/containous/traefik/middlewares/requestdecorator" +) + +func addRoute(ctx context.Context, router *mux.Router, rule string, priority int, handler http.Handler) error { + matchers, err := parseRule(rule) + if err != nil { + return err + } + + if priority == 0 { + priority = len(rule) + } + + route := router.NewRoute().Handler(handler).Priority(priority) + for _, matcher := range matchers { + matcher(route) + if route.GetError() != nil { + log.FromContext(ctx).Error(route.GetError()) + } + } + + return nil +} + +func parseRule(rule string) ([]func(*mux.Route), error) { + funcs := map[string]func(*mux.Route, ...string){ + "Host": host, + "HostRegexp": hostRegexp, + "Path": path, + "PathPrefix": pathPrefix, + "Method": methods, + "Headers": headers, + "HeadersRegexp": headersRegexp, + "Query": query, + } + + splitRule := func(c rune) bool { + return c == ';' + } + parsedRules := strings.FieldsFunc(rule, splitRule) + + var matchers []func(*mux.Route) + + for _, expression := range parsedRules { + expParts := strings.Split(expression, ":") + if len(expParts) > 1 && len(expParts[1]) > 0 { + if fn, ok := funcs[expParts[0]]; ok { + + parseOr := func(c rune) bool { + return c == ',' + } + + exp := strings.FieldsFunc(strings.Join(expParts[1:], ":"), parseOr) + + var trimmedExp []string + for _, value := range exp { + trimmedExp = append(trimmedExp, strings.TrimSpace(value)) + } + + // FIXME struct for onhostrule ? + matcher := func(rt *mux.Route) { + fn(rt, trimmedExp...) + } + + matchers = append(matchers, matcher) + } else { + return nil, fmt.Errorf("invalid matcher: %s", expression) + } + } + } + + return matchers, nil +} + +func path(route *mux.Route, paths ...string) { + rt := route.Subrouter() + for _, path := range paths { + tmpRt := rt.Path(path) + if tmpRt.GetError() != nil { + log.WithoutContext().WithField("paths", strings.Join(paths, ",")).Error(tmpRt.GetError()) + } + } +} + +func pathPrefix(route *mux.Route, paths ...string) { + rt := route.Subrouter() + for _, path := range paths { + tmpRt := rt.PathPrefix(path) + if tmpRt.GetError() != nil { + log.WithoutContext().WithField("paths", strings.Join(paths, ",")).Error(tmpRt.GetError()) + } + } +} + +func host(route *mux.Route, hosts ...string) { + for i, host := range hosts { + hosts[i] = strings.ToLower(host) + } + + route.MatcherFunc(func(req *http.Request, route *mux.RouteMatch) bool { + reqHost := requestdecorator.GetCanonizedHost(req.Context()) + if len(reqHost) == 0 { + log.FromContext(req.Context()).Warnf("Could not retrieve CanonizedHost, rejecting %s", req.Host) + return false + } + + flatH := requestdecorator.GetCNAMEFlatten(req.Context()) + if len(flatH) > 0 { + for _, host := range hosts { + if strings.EqualFold(reqHost, host) || strings.EqualFold(flatH, host) { + return true + } + log.FromContext(req.Context()).Debugf("CNAMEFlattening: request %s which resolved to %s, is not matched to route %s", reqHost, flatH, host) + } + return false + } + + for _, host := range hosts { + if reqHost == host { + return true + } + } + return false + }) +} + +func hostRegexp(route *mux.Route, hosts ...string) { + router := route.Subrouter() + for _, host := range hosts { + router.Host(host) + } +} + +func methods(route *mux.Route, methods ...string) { + route.Methods(methods...) +} + +func headers(route *mux.Route, headers ...string) { + route.Headers(headers...) +} + +func headersRegexp(route *mux.Route, headers ...string) { + route.HeadersRegexp(headers...) +} + +func query(route *mux.Route, query ...string) { + var queries []string + for _, elem := range query { + queries = append(queries, strings.Split(elem, "=")...) + } + + route.Queries(queries...) +} diff --git a/server/router/rules_test.go b/server/router/rules_test.go new file mode 100644 index 000000000..49541d53a --- /dev/null +++ b/server/router/rules_test.go @@ -0,0 +1,442 @@ +package router + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/containous/mux" + "github.com/containous/traefik/middlewares/requestdecorator" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_addRoute(t *testing.T) { + testCases := []struct { + desc string + rule string + headers map[string]string + expected map[string]int + }{ + { + desc: "no rule", + expected: map[string]int{ + "http://localhost/foo": http.StatusOK, + }, + }, + { + desc: "PathPrefix", + rule: "PathPrefix:/foo", + expected: map[string]int{ + "http://localhost/foo": http.StatusOK, + }, + }, + { + desc: "wrong PathPrefix", + rule: "PathPrefix:/bar", + expected: map[string]int{ + "http://localhost/foo": http.StatusNotFound, + }, + }, + { + desc: "Host", + rule: "Host:localhost", + expected: map[string]int{ + "http://localhost/foo": http.StatusOK, + }, + }, + { + desc: "wrong Host", + rule: "Host:nope", + expected: map[string]int{ + "http://localhost/foo": http.StatusNotFound, + }, + }, + { + desc: "Host and PathPrefix", + rule: "Host:localhost;PathPrefix:/foo", + expected: map[string]int{ + "http://localhost/foo": http.StatusOK, + }, + }, + { + desc: "Host and PathPrefix: wrong PathPrefix", + rule: "Host:localhost;PathPrefix:/bar", + expected: map[string]int{ + "http://localhost/foo": http.StatusNotFound, + }, + }, + { + desc: "Host and PathPrefix: wrong Host", + rule: "Host:nope;PathPrefix:/bar", + expected: map[string]int{ + "http://localhost/foo": http.StatusNotFound, + }, + }, + { + desc: "Host and PathPrefix: Host OR, first host", + rule: "Host:nope,localhost;PathPrefix:/foo", + expected: map[string]int{ + "http://localhost/foo": http.StatusOK, + }, + }, + { + desc: "Host and PathPrefix: Host OR, second host", + rule: "Host:nope,localhost;PathPrefix:/foo", + expected: map[string]int{ + "http://nope/foo": http.StatusOK, + }, + }, + { + desc: "Host and PathPrefix: Host OR, first host and wrong PathPrefix", + rule: "Host:nope,localhost;PathPrefix:/bar", + expected: map[string]int{ + "http://localhost/foo": http.StatusNotFound, + }, + }, + { + desc: "HostRegexp with capturing group", + rule: "HostRegexp: {subdomain:(foo\\.)?bar\\.com}", + expected: map[string]int{ + "http://foo.bar.com": http.StatusOK, + "http://bar.com": http.StatusOK, + "http://fooubar.com": http.StatusNotFound, + "http://barucom": http.StatusNotFound, + "http://barcom": http.StatusNotFound, + }, + }, + { + desc: "HostRegexp with non capturing group", + rule: "HostRegexp: {subdomain:(?:foo\\.)?bar\\.com}", + expected: map[string]int{ + "http://foo.bar.com": http.StatusOK, + "http://bar.com": http.StatusOK, + "http://fooubar.com": http.StatusNotFound, + "http://barucom": http.StatusNotFound, + "http://barcom": http.StatusNotFound, + }, + }, + { + desc: "Methods with GET", + rule: "Method: GET", + expected: map[string]int{ + "http://localhost/foo": http.StatusOK, + }, + }, + { + desc: "Methods with GET and POST", + rule: "Method: GET,POST", + expected: map[string]int{ + "http://localhost/foo": http.StatusOK, + }, + }, + { + desc: "Methods with POST", + rule: "Method: POST", + expected: map[string]int{ + "http://localhost/foo": http.StatusMethodNotAllowed, + }, + }, + { + desc: "Header with matching header", + rule: "Headers: Content-Type,application/json", + headers: map[string]string{ + "Content-Type": "application/json", + }, + expected: map[string]int{ + "http://localhost/foo": http.StatusOK, + }, + }, + { + desc: "Header without matching header", + rule: "Headers: Content-Type,application/foo", + headers: map[string]string{ + "Content-Type": "application/json", + }, + expected: map[string]int{ + "http://localhost/foo": http.StatusNotFound, + }, + }, + { + desc: "HeaderRegExp with matching header", + rule: "HeadersRegexp: Content-Type, application/(text|json)", + headers: map[string]string{ + "Content-Type": "application/json", + }, + expected: map[string]int{ + "http://localhost/foo": http.StatusOK, + }, + }, + { + desc: "HeaderRegExp without matching header", + rule: "HeadersRegexp: Content-Type, application/(text|json)", + headers: map[string]string{ + "Content-Type": "application/foo", + }, + expected: map[string]int{ + "http://localhost/foo": http.StatusNotFound, + }, + }, + { + desc: "HeaderRegExp with matching second header", + rule: "HeadersRegexp: Content-Type, application/(text|json)", + headers: map[string]string{ + "Content-Type": "application/text", + }, + expected: map[string]int{ + "http://localhost/foo": http.StatusOK, + }, + }, + { + desc: "Query with multiple params", + rule: "Query: foo=bar, bar=baz", + expected: map[string]int{ + "http://localhost/foo?foo=bar&bar=baz": http.StatusOK, + "http://localhost/foo?bar=baz": http.StatusNotFound, + }, + }, + { + desc: "Invalid rule syntax", + rule: "Query:param_one=true, /path2;Path: /path1", + expected: map[string]int{ + "http://localhost/foo?bar=baz": http.StatusNotFound, + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) + + router := mux.NewRouter() + router.SkipClean(true) + + err := addRoute(context.Background(), router, test.rule, 0, handler) + require.NoError(t, err) + + // RequestDecorator is necessary for the host rule + reqHost := requestdecorator.New(nil) + + results := make(map[string]int) + for calledURL := range test.expected { + w := httptest.NewRecorder() + + req := testhelpers.MustNewRequest(http.MethodGet, calledURL, nil) + for key, value := range test.headers { + req.Header.Set(key, value) + } + reqHost.ServeHTTP(w, req, router.ServeHTTP) + results[calledURL] = w.Code + } + assert.Equal(t, test.expected, results) + + }) + } +} + +func Test_addRoutePriority(t *testing.T) { + type Case struct { + xFrom string + rule string + priority int + } + + testCases := []struct { + desc string + path string + cases []Case + expected string + }{ + { + desc: "Higher priority on second rule", + path: "/my", + cases: []Case{ + { + xFrom: "header1", + rule: "PathPrefix:/my", + priority: 10, + }, + { + xFrom: "header2", + rule: "PathPrefix:/my", + priority: 20, + }, + }, + expected: "header2", + }, + { + desc: "Higher priority on first rule", + path: "/my", + cases: []Case{ + { + xFrom: "header1", + rule: "PathPrefix:/my", + priority: 20, + }, + { + xFrom: "header2", + rule: "PathPrefix:/my", + priority: 10, + }, + }, + expected: "header1", + }, + { + desc: "Higher priority on second rule with different rule", + path: "/mypath", + cases: []Case{ + { + xFrom: "header1", + rule: "PathPrefix:/mypath", + priority: 10, + }, + { + xFrom: "header2", + rule: "PathPrefix:/my", + priority: 20, + }, + }, + expected: "header2", + }, + { + desc: "Higher priority on longest rule (longest first)", + path: "/mypath", + cases: []Case{ + { + xFrom: "header1", + rule: "PathPrefix:/mypath", + }, + { + xFrom: "header2", + rule: "PathPrefix:/my", + }, + }, + expected: "header1", + }, + { + desc: "Higher priority on longest rule (longest second)", + path: "/mypath", + cases: []Case{ + { + xFrom: "header1", + rule: "PathPrefix:/my", + }, + { + xFrom: "header2", + rule: "PathPrefix:/mypath", + }, + }, + expected: "header2", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + router := mux.NewRouter() + + for _, route := range test.cases { + route := route + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-From", route.xFrom) + }) + err := addRoute(context.Background(), router, route.rule, route.priority, handler) + require.NoError(t, err, route) + } + + router.SortRoutes() + + w := httptest.NewRecorder() + req := testhelpers.MustNewRequest(http.MethodGet, test.path, nil) + + router.ServeHTTP(w, req) + + assert.Equal(t, test.expected, w.Header().Get("X-From")) + }) + } +} + +func TestHostRegexp(t *testing.T) { + testCases := []struct { + desc string + hostExp string + urls map[string]bool + }{ + { + desc: "capturing group", + hostExp: "{subdomain:(foo\\.)?bar\\.com}", + urls: map[string]bool{ + "http://foo.bar.com": true, + "http://bar.com": true, + "http://fooubar.com": false, + "http://barucom": false, + "http://barcom": false, + }, + }, + { + desc: "non capturing group", + hostExp: "{subdomain:(?:foo\\.)?bar\\.com}", + urls: map[string]bool{ + "http://foo.bar.com": true, + "http://bar.com": true, + "http://fooubar.com": false, + "http://barucom": false, + "http://barcom": false, + }, + }, + { + desc: "regex insensitive", + hostExp: "{dummy:[A-Za-z-]+\\.bar\\.com}", + urls: map[string]bool{ + "http://FOO.bar.com": true, + "http://foo.bar.com": true, + "http://fooubar.com": false, + "http://barucom": false, + "http://barcom": false, + }, + }, + { + desc: "insensitive host", + hostExp: "{dummy:[a-z-]+\\.bar\\.com}", + urls: map[string]bool{ + "http://FOO.bar.com": true, + "http://foo.bar.com": true, + "http://fooubar.com": false, + "http://barucom": false, + "http://barcom": false, + }, + }, + { + desc: "insensitive host simple", + hostExp: "foo.bar.com", + urls: map[string]bool{ + "http://FOO.bar.com": true, + "http://foo.bar.com": true, + "http://fooubar.com": false, + "http://barucom": false, + "http://barcom": false, + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + rt := &mux.Route{} + hostRegexp(rt, test.hostExp) + + for testURL, match := range test.urls { + req := testhelpers.MustNewRequest(http.MethodGet, testURL, nil) + assert.Equal(t, match, rt.Match(req, &mux.RouteMatch{}), testURL) + } + }) + } +} diff --git a/server/server.go b/server/server.go index f9027b692..ac11e4e16 100644 --- a/server/server.go +++ b/server/server.go @@ -9,35 +9,36 @@ import ( stdlog "log" "net" "net/http" - "net/http/httputil" - "net/url" "os" "os/signal" "sync" "time" "github.com/armon/go-proxyproto" - "github.com/containous/mux" "github.com/containous/traefik/cluster" - "github.com/containous/traefik/configuration" + "github.com/containous/traefik/config" + "github.com/containous/traefik/config/static" "github.com/containous/traefik/h2c" "github.com/containous/traefik/ip" "github.com/containous/traefik/log" "github.com/containous/traefik/metrics" "github.com/containous/traefik/middlewares" "github.com/containous/traefik/middlewares/accesslog" - "github.com/containous/traefik/middlewares/tracing" + "github.com/containous/traefik/middlewares/requestdecorator" + "github.com/containous/traefik/old/configuration" "github.com/containous/traefik/provider" "github.com/containous/traefik/safe" + "github.com/containous/traefik/server/middleware" traefiktls "github.com/containous/traefik/tls" + "github.com/containous/traefik/tracing" + "github.com/containous/traefik/tracing/datadog" + "github.com/containous/traefik/tracing/jaeger" + "github.com/containous/traefik/tracing/zipkin" "github.com/containous/traefik/types" "github.com/sirupsen/logrus" - "github.com/urfave/negroni" "github.com/xenolf/lego/acme" ) -var httpServerLogger = stdlog.New(log.WriterLevel(logrus.DebugLevel), "", 0) - func newHijackConnectionTracker() *hijackConnectionTracker { return &hijackConnectionTracker{ conns: make(map[net.Conn]struct{}), @@ -85,7 +86,7 @@ func (h *hijackConnectionTracker) Shutdown(ctx context.Context) error { func (h *hijackConnectionTracker) Close() { for conn := range h.conns { if err := conn.Close(); err != nil { - log.Errorf("Error while closing Hijacked conn: %v", err) + log.WithoutContext().Errorf("Error while closing Hijacked connection: %v", err) } delete(h.conns, conn) } @@ -93,33 +94,38 @@ func (h *hijackConnectionTracker) Close() { // Server is the reverse-proxy/load-balancer engine type Server struct { - serverEntryPoints serverEntryPoints - configurationChan chan types.ConfigMessage - configurationValidatedChan chan types.ConfigMessage - signals chan os.Signal - stopChan chan bool - currentConfigurations safe.Safe - providerConfigUpdateMap map[string]chan types.ConfigMessage - globalConfiguration configuration.GlobalConfiguration - accessLoggerMiddleware *accesslog.LogHandler - tracingMiddleware *tracing.Tracing - routinesPool *safe.Pool - leadership *cluster.Leadership - defaultForwardingRoundTripper http.RoundTripper - metricsRegistry metrics.Registry - provider provider.Provider - configurationListeners []func(types.Configuration) - entryPoints map[string]EntryPoint - bufferPool httputil.BufferPool + serverEntryPoints serverEntryPoints + configurationChan chan config.Message + configurationValidatedChan chan config.Message + signals chan os.Signal + stopChan chan bool + currentConfigurations safe.Safe + providerConfigUpdateMap map[string]chan config.Message + globalConfiguration configuration.GlobalConfiguration + accessLoggerMiddleware *accesslog.Handler + tracer *tracing.Tracing + routinesPool *safe.Pool + leadership *cluster.Leadership + defaultRoundTripper http.RoundTripper + metricsRegistry metrics.Registry + provider provider.Provider + configurationListeners []func(config.Configuration) + entryPoints map[string]EntryPoint + requestDecorator *requestdecorator.RequestDecorator +} + +// RouteAppenderFactory the route appender factory interface +type RouteAppenderFactory interface { + NewAppender(ctx context.Context, middlewaresBuilder *middleware.Builder, currentConfigurations *safe.Safe) types.RouteAppender } // EntryPoint entryPoint information (configuration + internalRouter) type EntryPoint struct { - InternalRouter types.InternalRouter - Configuration *configuration.EntryPoint - OnDemandListener func(string) (*tls.Certificate, error) - TLSALPNGetter func(string) (*tls.Certificate, error) - CertificateStore *traefiktls.CertificateStore + RouteAppenderFactory RouteAppenderFactory + Configuration *configuration.EntryPoint + OnDemandListener func(string) (*tls.Certificate, error) + TLSALPNGetter func(string) (*tls.Certificate, error) + CertificateStore *traefiktls.CertificateStore } type serverEntryPoints map[string]*serverEntryPoint @@ -142,10 +148,11 @@ func (s serverEntryPoint) Shutdown(ctx context.Context) { defer wg.Done() if err := s.httpServer.Shutdown(ctx); err != nil { if ctx.Err() == context.DeadlineExceeded { - log.Debugf("Wait server shutdown is over due to: %s", err) + logger := log.FromContext(ctx) + logger.Debugf("Wait server shutdown is over due to: %s", err) err = s.httpServer.Close() if err != nil { - log.Error(err) + logger.Error(err) } } } @@ -158,7 +165,8 @@ func (s serverEntryPoint) Shutdown(ctx context.Context) { defer wg.Done() if err := s.hijackConnectionTracker.Shutdown(ctx); err != nil { if ctx.Err() == context.DeadlineExceeded { - log.Debugf("Wait hijack connection is over due to: %s", err) + logger := log.FromContext(ctx) + logger.Debugf("Wait hijack connection is over due to: %s", err) s.hijackConnectionTracker.Close() } } @@ -179,11 +187,32 @@ func (ln tcpKeepAliveListener) Accept() (net.Conn, error) { if err != nil { return nil, err } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) + + if err = tc.SetKeepAlive(true); err != nil { + return nil, err + } + + if err = tc.SetKeepAlivePeriod(3 * time.Minute); err != nil { + return nil, err + } + return tc, nil } +func setupTracing(conf *static.Tracing) tracing.TrackingBackend { + switch conf.Backend { + case jaeger.Name: + return conf.Jaeger + case zipkin.Name: + return conf.Zipkin + case datadog.Name: + return conf.DataDog + default: + log.WithoutContext().Warnf("Could not initialize tracing: unknown tracer %q", conf.Backend) + return nil + } +} + // NewServer returns an initialized Server. func NewServer(globalConfiguration configuration.GlobalConfiguration, provider provider.Provider, entrypoints map[string]EntryPoint) *Server { server := &Server{} @@ -192,36 +221,41 @@ func NewServer(globalConfiguration configuration.GlobalConfiguration, provider p server.provider = provider server.globalConfiguration = globalConfiguration server.serverEntryPoints = make(map[string]*serverEntryPoint) - server.configurationChan = make(chan types.ConfigMessage, 100) - server.configurationValidatedChan = make(chan types.ConfigMessage, 100) + server.configurationChan = make(chan config.Message, 100) + server.configurationValidatedChan = make(chan config.Message, 100) server.signals = make(chan os.Signal, 1) server.stopChan = make(chan bool, 1) server.configureSignals() - currentConfigurations := make(types.Configurations) + currentConfigurations := make(config.Configurations) server.currentConfigurations.Set(currentConfigurations) - server.providerConfigUpdateMap = make(map[string]chan types.ConfigMessage) + server.providerConfigUpdateMap = make(map[string]chan config.Message) + + transport, err := createHTTPTransport(globalConfiguration) + if err != nil { + log.WithoutContext().Error(err) + server.defaultRoundTripper = http.DefaultTransport + } else { + server.defaultRoundTripper = transport + } if server.globalConfiguration.API != nil { server.globalConfiguration.API.CurrentConfigurations = &server.currentConfigurations } - server.bufferPool = newBufferPool() - server.routinesPool = safe.NewPool(context.Background()) - transport, err := createHTTPTransport(globalConfiguration) - if err != nil { - log.Errorf("failed to create HTTP transport: %v", err) + if globalConfiguration.Tracing != nil { + trackingBackend := setupTracing(static.ConvertTracing(globalConfiguration.Tracing)) + var err error + server.tracer, err = tracing.NewTracing(globalConfiguration.Tracing.ServiceName, globalConfiguration.Tracing.SpanNameLimit, trackingBackend) + if err != nil { + log.WithoutContext().Warnf("Unable to create tracer: %v", err) + } } - server.defaultForwardingRoundTripper = transport + server.requestDecorator = requestdecorator.New(static.ConvertHostResolverConfig(globalConfiguration.HostResolver)) - server.tracingMiddleware = globalConfiguration.Tracing - if server.tracingMiddleware != nil && server.tracingMiddleware.Backend != "" { - server.tracingMiddleware.Setup() - } - - server.metricsRegistry = registerMetricClients(globalConfiguration.Metrics) + server.metricsRegistry = registerMetricClients(static.ConvertMetrics(globalConfiguration.Metrics)) if globalConfiguration.Cluster != nil { // leadership creation if cluster mode @@ -230,9 +264,9 @@ func NewServer(globalConfiguration configuration.GlobalConfiguration, provider p if globalConfiguration.AccessLog != nil { var err error - server.accessLoggerMiddleware, err = accesslog.NewLogHandler(globalConfiguration.AccessLog) + server.accessLoggerMiddleware, err = accesslog.NewHandler(static.ConvertAccessLog(globalConfiguration.AccessLog)) if err != nil { - log.Warnf("Unable to create log handler: %s", err) + log.WithoutContext().Warnf("Unable to create access logger : %v", err) } } return server @@ -259,13 +293,16 @@ func (s *Server) StartWithContext(ctx context.Context) { go func() { defer s.Close() <-ctx.Done() - log.Info("I have to go...") + logger := log.FromContext(ctx) + logger.Info("I have to go...") + reqAcceptGraceTimeOut := time.Duration(s.globalConfiguration.LifeCycle.RequestAcceptGraceTimeout) if reqAcceptGraceTimeOut > 0 { - log.Infof("Waiting %s for incoming requests to cease", reqAcceptGraceTimeOut) + logger.Infof("Waiting %s for incoming requests to cease", reqAcceptGraceTimeOut) time.Sleep(reqAcceptGraceTimeOut) } - log.Info("Stopping server gracefully") + + logger.Info("Stopping server gracefully") s.Stop() }() s.Start() @@ -278,18 +315,23 @@ func (s *Server) Wait() { // Stop stops the server func (s *Server) Stop() { - defer log.Info("Server stopped") + defer log.WithoutContext().Info("Server stopped") + var wg sync.WaitGroup for sepn, sep := range s.serverEntryPoints { wg.Add(1) go func(serverEntryPointName string, serverEntryPoint *serverEntryPoint) { defer wg.Done() + logger := log.WithoutContext().WithField(log.EntryPointName, serverEntryPointName) + graceTimeOut := time.Duration(s.globalConfiguration.LifeCycle.GraceTimeOut) ctx, cancel := context.WithTimeout(context.Background(), graceTimeOut) - log.Debugf("Waiting %s seconds before killing connections on entrypoint %s...", graceTimeOut, serverEntryPointName) + logger.Debugf("Waiting %s seconds before killing connections on entrypoint %s...", graceTimeOut, serverEntryPointName) + serverEntryPoint.Shutdown(ctx) cancel() - log.Debugf("Entrypoint %s closed", serverEntryPointName) + + logger.Debugf("Entry point %s closed", serverEntryPointName) }(sepn, sep) } wg.Wait() @@ -307,6 +349,7 @@ func (s *Server) Close() { panic("Timeout while stopping traefik, killing instance ✝") } }(ctx) + stopMetricsClients() s.stopLeadership() s.routinesPool.Cleanup() @@ -315,11 +358,17 @@ func (s *Server) Close() { signal.Stop(s.signals) close(s.signals) close(s.stopChan) + if s.accessLoggerMiddleware != nil { if err := s.accessLoggerMiddleware.Close(); err != nil { - log.Errorf("Error closing access log file: %s", err) + log.WithoutContext().Errorf("Could not close the access log file: %s", err) } } + + if s.tracer != nil { + s.tracer.Close() + } + cancel() } @@ -339,8 +388,9 @@ func (s *Server) startHTTPServers() { s.serverEntryPoints = s.buildServerEntryPoints() for newServerEntryPointName, newServerEntryPoint := range s.serverEntryPoints { - serverEntryPoint := s.setupServerEntryPoint(newServerEntryPointName, newServerEntryPoint) - go s.startServer(serverEntryPoint) + ctx := log.With(context.Background(), log.Str(log.EntryPointName, newServerEntryPointName)) + serverEntryPoint := s.setupServerEntryPoint(ctx, newServerEntryPointName, newServerEntryPoint) + go s.startServer(ctx, serverEntryPoint) } } @@ -359,9 +409,9 @@ func (s *Server) listenProviders(stop chan bool) { } // AddListener adds a new listener function used when new configuration is provided -func (s *Server) AddListener(listener func(types.Configuration)) { +func (s *Server) AddListener(listener func(config.Configuration)) { if s.configurationListeners == nil { - s.configurationListeners = make([]func(types.Configuration), 0) + s.configurationListeners = make([]func(config.Configuration), 0) } s.configurationListeners = append(s.configurationListeners, listener) } @@ -395,22 +445,23 @@ func (s *serverEntryPoint) getCertificate(clientHello *tls.ClientHelloInfo) (*tl return nil, fmt.Errorf("strict SNI enabled - No certificate found for domain: %q, closing connection", domainToCheck) } - log.Debugf("Serving default cert for request: %q", domainToCheck) + log.WithoutContext().Debugf("Serving default certificate for request: %q", domainToCheck) return s.certs.DefaultCertificate, nil } func (s *Server) startProvider() { - // start providers jsonConf, err := json.Marshal(s.provider) if err != nil { - log.Debugf("Unable to marshal provider conf %T with error: %v", s.provider, err) + log.WithoutContext().Debugf("Unable to marshal provider configuration %T: %v", s.provider, err) } - log.Infof("Starting provider %T %s", s.provider, jsonConf) + + log.WithoutContext().Infof("Starting provider %T %s", s.provider, jsonConf) currentProvider := s.provider + safe.Go(func() { err := currentProvider.Provide(s.configurationChan, s.routinesPool) if err != nil { - log.Errorf("Error starting provider %T: %s", s.provider, err) + log.WithoutContext().Errorf("Error starting provider %T: %s", s.provider, err) } }) } @@ -421,7 +472,7 @@ func (s *Server) createTLSConfig(entryPointName string, tlsOption *traefiktls.TL return nil, nil } - config, err := tlsOption.Certificates.CreateTLSConfig(entryPointName) + conf, err := tlsOption.Certificates.CreateTLSConfig(entryPointName) if err != nil { return nil, err } @@ -429,7 +480,7 @@ func (s *Server) createTLSConfig(entryPointName string, tlsOption *traefiktls.TL s.serverEntryPoints[entryPointName].certs.DynamicCerts.Set(make(map[string]*tls.Certificate)) // ensure http2 enabled - config.NextProtos = []string{"h2", "http/1.1", acme.ACMETLS1Protocol} + conf.NextProtos = []string{"h2", "http/1.1", acme.ACMETLS1Protocol} if len(tlsOption.ClientCA.Files) > 0 { pool := x509.NewCertPool() @@ -443,55 +494,59 @@ func (s *Server) createTLSConfig(entryPointName string, tlsOption *traefiktls.TL return nil, fmt.Errorf("invalid certificate(s) in %s", caFile) } } - config.ClientCAs = pool + conf.ClientCAs = pool if tlsOption.ClientCA.Optional { - config.ClientAuth = tls.VerifyClientCertIfGiven + conf.ClientAuth = tls.VerifyClientCertIfGiven } else { - config.ClientAuth = tls.RequireAndVerifyClientCert + conf.ClientAuth = tls.RequireAndVerifyClientCert } } - if s.globalConfiguration.ACME != nil && entryPointName == s.globalConfiguration.ACME.EntryPoint { - checkOnDemandDomain := func(domain string) bool { - routeMatch := &mux.RouteMatch{} - match := router.GetHandler().Match(&http.Request{URL: &url.URL{}, Host: domain}, routeMatch) - if match && routeMatch.Route != nil { - return true - } - return false - } - - err := s.globalConfiguration.ACME.CreateClusterConfig(s.leadership, config, s.serverEntryPoints[entryPointName].certs.DynamicCerts, checkOnDemandDomain) - if err != nil { - return nil, err - } + // FIXME onDemand + if s.globalConfiguration.ACME != nil { + // if entryPointName == s.globalConfiguration.ACME.EntryPoint { + // checkOnDemandDomain := func(domain string) bool { + // routeMatch := &mux.RouteMatch{} + // match := router.GetHandler().Match(&http.Request{URL: &url.URL{}, Host: domain}, routeMatch) + // if match && routeMatch.Route != nil { + // return true + // } + // return false + // } + // + // err := s.globalConfiguration.ACME.CreateClusterConfig(s.leadership, config, s.serverEntryPoints[entryPointName].certs.DynamicCerts, checkOnDemandDomain) + // if err != nil { + // return nil, err + // } + // } } else { - config.GetCertificate = s.serverEntryPoints[entryPointName].getCertificate - if len(config.Certificates) != 0 { - certMap := s.buildNameOrIPToCertificate(config.Certificates) - - if s.entryPoints[entryPointName].CertificateStore != nil { - s.entryPoints[entryPointName].CertificateStore.StaticCerts.Set(certMap) - } - } - - // Remove certs from the TLS config object - config.Certificates = []tls.Certificate{} + conf.GetCertificate = s.serverEntryPoints[entryPointName].getCertificate } + if len(conf.Certificates) != 0 { + certMap := s.buildNameOrIPToCertificate(conf.Certificates) + + if s.entryPoints[entryPointName].CertificateStore != nil { + s.entryPoints[entryPointName].CertificateStore.StaticCerts.Set(certMap) + } + } + + // Remove certs from the TLS config object + conf.Certificates = []tls.Certificate{} + // Set the minimum TLS version if set in the config TOML - if minConst, exists := traefiktls.MinVersion[s.entryPoints[entryPointName].Configuration.TLS.MinVersion]; exists { - config.PreferServerCipherSuites = true - config.MinVersion = minConst + if minConst, exists := traefiktls.MinVersion[tlsOption.MinVersion]; exists { + conf.PreferServerCipherSuites = true + conf.MinVersion = minConst } // Set the list of CipherSuites if set in the config TOML - if s.entryPoints[entryPointName].Configuration.TLS.CipherSuites != nil { - // if our list of CipherSuites is defined in the entrypoint config, we can re-initilize the suites list as empty - config.CipherSuites = make([]uint16, 0) - for _, cipher := range s.entryPoints[entryPointName].Configuration.TLS.CipherSuites { + if tlsOption.CipherSuites != nil { + // if our list of CipherSuites is defined in the entryPoint config, we can re-initialize the suites list as empty + conf.CipherSuites = make([]uint16, 0) + for _, cipher := range tlsOption.CipherSuites { if cipherConst, exists := traefiktls.CipherSuites[cipher]; exists { - config.CipherSuites = append(config.CipherSuites, cipherConst) + conf.CipherSuites = append(conf.CipherSuites, cipherConst) } else { // CipherSuite listed in the toml does not exist in our listed return nil, fmt.Errorf("invalid CipherSuite: %s", cipher) @@ -499,11 +554,12 @@ func (s *Server) createTLSConfig(entryPointName string, tlsOption *traefiktls.TL } } - return config, nil + return conf, nil } -func (s *Server) startServer(serverEntryPoint *serverEntryPoint) { - log.Infof("Starting server on %s", serverEntryPoint.httpServer.Addr) +func (s *Server) startServer(ctx context.Context, serverEntryPoint *serverEntryPoint) { + logger := log.FromContext(ctx) + logger.Infof("Starting server on %s", serverEntryPoint.httpServer.Addr) var err error if serverEntryPoint.httpServer.TLSConfig != nil { @@ -513,19 +569,14 @@ func (s *Server) startServer(serverEntryPoint *serverEntryPoint) { } if err != http.ErrServerClosed { - log.Error("Error creating server: ", err) + logger.Error("Cannot create server: %v", err) } } -func (s *Server) setupServerEntryPoint(newServerEntryPointName string, newServerEntryPoint *serverEntryPoint) *serverEntryPoint { - serverMiddlewares, err := s.buildServerEntryPointMiddlewares(newServerEntryPointName) +func (s *Server) setupServerEntryPoint(ctx context.Context, newServerEntryPointName string, newServerEntryPoint *serverEntryPoint) *serverEntryPoint { + newSrv, listener, err := s.prepareServer(ctx, newServerEntryPointName, s.entryPoints[newServerEntryPointName].Configuration, newServerEntryPoint.httpRouter) if err != nil { - log.Fatal("Error preparing server: ", err) - } - - newSrv, listener, err := s.prepareServer(newServerEntryPointName, s.entryPoints[newServerEntryPointName].Configuration, newServerEntryPoint.httpRouter, serverMiddlewares) - if err != nil { - log.Fatal("Error preparing server: ", err) + log.FromContext(ctx).Fatalf("Error preparing server: %v", err) } serverEntryPoint := s.serverEntryPoints[newServerEntryPointName] @@ -545,19 +596,15 @@ func (s *Server) setupServerEntryPoint(newServerEntryPointName string, newServer return serverEntryPoint } -func (s *Server) prepareServer(entryPointName string, entryPoint *configuration.EntryPoint, router *middlewares.HandlerSwitcher, middlewares []negroni.Handler) (*h2c.Server, net.Listener, error) { +func (s *Server) prepareServer(ctx context.Context, entryPointName string, entryPoint *configuration.EntryPoint, router *middlewares.HandlerSwitcher) (*h2c.Server, net.Listener, error) { + logger := log.FromContext(ctx) + readTimeout, writeTimeout, idleTimeout := buildServerTimeouts(s.globalConfiguration) - log.Infof("Preparing server %s %+v with readTimeout=%s writeTimeout=%s idleTimeout=%s", entryPointName, entryPoint, readTimeout, writeTimeout, idleTimeout) - - // middlewares - n := negroni.New() - for _, middleware := range middlewares { - n.Use(middleware) - } - n.UseHandler(router) - - internalMuxRouter := s.buildInternalRouter(entryPointName) - internalMuxRouter.NotFoundHandler = n + logger. + WithField("readTimeout", readTimeout). + WithField("writeTimeout", writeTimeout). + WithField("idleTimeout", idleTimeout). + Infof("Preparing server %+v", entryPoint) tlsConfig, err := s.createTLSConfig(entryPointName, entryPoint.TLS, router) if err != nil { @@ -572,16 +619,18 @@ func (s *Server) prepareServer(entryPointName string, entryPoint *configuration. listener = tcpKeepAliveListener{listener.(*net.TCPListener)} if entryPoint.ProxyProtocol != nil { - listener, err = buildProxyProtocolListener(entryPoint, listener) + listener, err = buildProxyProtocolListener(ctx, entryPoint, listener) if err != nil { return nil, nil, fmt.Errorf("error creating proxy protocol listener: %v", err) } } + httpServerLogger := stdlog.New(logger.WriterLevel(logrus.DebugLevel), "", 0) + return &h2c.Server{ Server: &http.Server{ Addr: entryPoint.Address, - Handler: internalMuxRouter, + Handler: router, TLSConfig: tlsConfig, ReadTimeout: readTimeout, WriteTimeout: writeTimeout, @@ -593,7 +642,7 @@ func (s *Server) prepareServer(entryPointName string, entryPoint *configuration. nil } -func buildProxyProtocolListener(entryPoint *configuration.EntryPoint, listener net.Listener) (net.Listener, error) { +func buildProxyProtocolListener(ctx context.Context, entryPoint *configuration.EntryPoint, listener net.Listener) (net.Listener, error) { var sourceCheck func(addr net.Addr) (bool, error) if entryPoint.ProxyProtocol.Insecure { sourceCheck = func(_ net.Addr) (bool, error) { @@ -615,7 +664,7 @@ func buildProxyProtocolListener(entryPoint *configuration.EntryPoint, listener n } } - log.Infof("Enabling ProxyProtocol for trusted IPs %v", entryPoint.ProxyProtocol.TrustedIPs) + log.FromContext(ctx).Infof("Enabling ProxyProtocol for trusted IPs %v", entryPoint.ProxyProtocol.TrustedIPs) return &proxyproto.Listener{ Listener: listener, @@ -623,23 +672,6 @@ func buildProxyProtocolListener(entryPoint *configuration.EntryPoint, listener n }, nil } -func (s *Server) buildInternalRouter(entryPointName string) *mux.Router { - internalMuxRouter := mux.NewRouter() - internalMuxRouter.StrictSlash(!s.globalConfiguration.KeepTrailingSlash) - internalMuxRouter.SkipClean(true) - - if entryPoint, ok := s.entryPoints[entryPointName]; ok && entryPoint.InternalRouter != nil { - entryPoint.InternalRouter.AddRoutes(internalMuxRouter) - - if s.globalConfiguration.API != nil && s.globalConfiguration.API.EntryPoint == entryPointName && s.leadership != nil { - s.leadership.AddRoutes(internalMuxRouter) - - } - } - - return internalMuxRouter -} - func buildServerTimeouts(globalConfig configuration.GlobalConfiguration) (readTimeout, writeTimeout, idleTimeout time.Duration) { readTimeout = time.Duration(0) writeTimeout = time.Duration(0) @@ -663,24 +695,35 @@ func registerMetricClients(metricsConfig *types.Metrics) metrics.Registry { } var registries []metrics.Registry + if metricsConfig.Prometheus != nil { - prometheusRegister := metrics.RegisterPrometheus(metricsConfig.Prometheus) + ctx := log.With(context.Background(), log.Str(log.MetricsProviderName, "prometheus")) + prometheusRegister := metrics.RegisterPrometheus(ctx, metricsConfig.Prometheus) if prometheusRegister != nil { registries = append(registries, prometheusRegister) - log.Debug("Configured Prometheus metrics") + log.FromContext(ctx).Debug("Configured Prometheus metrics") } } + if metricsConfig.Datadog != nil { - registries = append(registries, metrics.RegisterDatadog(metricsConfig.Datadog)) - log.Debugf("Configured DataDog metrics pushing to %s once every %s", metricsConfig.Datadog.Address, metricsConfig.Datadog.PushInterval) + ctx := log.With(context.Background(), log.Str(log.MetricsProviderName, "datadog")) + registries = append(registries, metrics.RegisterDatadog(ctx, metricsConfig.Datadog)) + log.FromContext(ctx).Debugf("Configured DataDog metrics: pushing to %s once every %s", + metricsConfig.Datadog.Address, metricsConfig.Datadog.PushInterval) } + if metricsConfig.StatsD != nil { - registries = append(registries, metrics.RegisterStatsd(metricsConfig.StatsD)) - log.Debugf("Configured StatsD metrics pushing to %s once every %s", metricsConfig.StatsD.Address, metricsConfig.StatsD.PushInterval) + ctx := log.With(context.Background(), log.Str(log.MetricsProviderName, "statsd")) + registries = append(registries, metrics.RegisterStatsd(ctx, metricsConfig.StatsD)) + log.FromContext(ctx).Debugf("Configured StatsD metrics: pushing to %s once every %s", + metricsConfig.StatsD.Address, metricsConfig.StatsD.PushInterval) } + if metricsConfig.InfluxDB != nil { - registries = append(registries, metrics.RegisterInfluxDB(metricsConfig.InfluxDB)) - log.Debugf("Configured InfluxDB metrics pushing to %s once every %s", metricsConfig.InfluxDB.Address, metricsConfig.InfluxDB.PushInterval) + ctx := log.With(context.Background(), log.Str(log.MetricsProviderName, "influxdb")) + registries = append(registries, metrics.RegisterInfluxDB(ctx, metricsConfig.InfluxDB)) + log.FromContext(ctx).Debugf("Configured InfluxDB metrics: pushing to %s once every %s", + metricsConfig.InfluxDB.Address, metricsConfig.InfluxDB.PushInterval) } return metrics.NewMultiRegistry(registries) diff --git a/server/server_configuration.go b/server/server_configuration.go index ac46743a9..1ded2da1a 100644 --- a/server/server_configuration.go +++ b/server/server_configuration.go @@ -1,41 +1,42 @@ package server import ( + "context" "crypto/tls" "encoding/json" "fmt" - "net" "net/http" "reflect" - "sort" - "strings" "time" - "github.com/containous/flaeg/parse" + "github.com/containous/alice" "github.com/containous/mux" - "github.com/containous/traefik/configuration" - "github.com/containous/traefik/healthcheck" - "github.com/containous/traefik/hostresolver" + "github.com/containous/traefik/config" + "github.com/containous/traefik/config/static" "github.com/containous/traefik/log" - "github.com/containous/traefik/metrics" "github.com/containous/traefik/middlewares" - "github.com/containous/traefik/middlewares/pipelining" - "github.com/containous/traefik/rules" + "github.com/containous/traefik/middlewares/accesslog" + "github.com/containous/traefik/middlewares/requestdecorator" + "github.com/containous/traefik/middlewares/tracing" + "github.com/containous/traefik/old/configuration" + "github.com/containous/traefik/responsemodifiers" + "github.com/containous/traefik/server/middleware" + "github.com/containous/traefik/server/router" + "github.com/containous/traefik/server/service" traefiktls "github.com/containous/traefik/tls" "github.com/containous/traefik/tls/generate" - "github.com/containous/traefik/types" "github.com/eapache/channels" "github.com/sirupsen/logrus" - "github.com/urfave/negroni" - "github.com/vulcand/oxy/forward" ) // loadConfiguration manages dynamically frontends, backends and TLS configurations -func (s *Server) loadConfiguration(configMsg types.ConfigMessage) { - currentConfigurations := s.currentConfigurations.Get().(types.Configurations) +func (s *Server) loadConfiguration(configMsg config.Message) { + logger := log.FromContext(log.With(context.Background(), log.Str(log.ProviderName, configMsg.ProviderName))) + + currentConfigurations := s.currentConfigurations.Get().(config.Configurations) // Copy configurations to new map so we don't change current if LoadConfig fails - newConfigurations := make(types.Configurations) + newConfigurations := make(config.Configurations) for k, v := range currentConfigurations { newConfigurations[k] = v } @@ -43,22 +44,25 @@ func (s *Server) loadConfiguration(configMsg types.ConfigMessage) { s.metricsRegistry.ConfigReloadsCounter().Add(1) - newServerEntryPoints := s.loadConfig(newConfigurations, s.globalConfiguration) + handlers, certificates := s.loadConfig(newConfigurations, s.globalConfiguration) s.metricsRegistry.LastConfigReloadSuccessGauge().Set(float64(time.Now().Unix())) - for newServerEntryPointName, newServerEntryPoint := range newServerEntryPoints { - s.serverEntryPoints[newServerEntryPointName].httpRouter.UpdateHandler(newServerEntryPoint.httpRouter.GetHandler()) + for entryPointName, handler := range handlers { + s.serverEntryPoints[entryPointName].httpRouter.UpdateHandler(handler) + } - if s.entryPoints[newServerEntryPointName].Configuration.TLS == nil { - if newServerEntryPoint.certs.ContainsCertificates() { - log.Debugf("Certificates not added to non-TLS entryPoint %s.", newServerEntryPointName) + for entryPointName, serverEntryPoint := range s.serverEntryPoints { + eLogger := logger.WithField(log.EntryPointName, entryPointName) + if s.entryPoints[entryPointName].Configuration.TLS == nil { + if len(certificates[entryPointName]) > 0 { + eLogger.Debugf("Cannot configure certificates for the non-TLS %s entryPoint.", entryPointName) } } else { - s.serverEntryPoints[newServerEntryPointName].certs.DynamicCerts.Set(newServerEntryPoint.certs.DynamicCerts.Get()) - s.serverEntryPoints[newServerEntryPointName].certs.ResetCache() + serverEntryPoint.certs.DynamicCerts.Set(certificates[entryPointName]) + serverEntryPoint.certs.ResetCache() } - log.Infof("Server configuration reloaded on %s", s.serverEntryPoints[newServerEntryPointName].httpServer.Addr) + eLogger.Infof("Server configuration reloaded on %s", s.serverEntryPoints[entryPointName].httpServer.Addr) } s.currentConfigurations.Set(newConfigurations) @@ -72,251 +76,127 @@ func (s *Server) loadConfiguration(configMsg types.ConfigMessage) { // loadConfig returns a new gorilla.mux Route from the specified global configuration and the dynamic // provider configurations. -func (s *Server) loadConfig(configurations types.Configurations, globalConfiguration configuration.GlobalConfiguration) map[string]*serverEntryPoint { +func (s *Server) loadConfig(configurations config.Configurations, globalConfiguration configuration.GlobalConfiguration) (map[string]http.Handler, map[string]map[string]*tls.Certificate) { - serverEntryPoints := s.buildServerEntryPoints() + ctx := context.TODO() - backendsHandlers := map[string]http.Handler{} - backendsHealthCheck := map[string]*healthcheck.BackendConfig{} - - var postConfigs []handlerPostConfig - - for providerName, config := range configurations { - frontendNames := sortedFrontendNamesForConfig(config) - - for _, frontendName := range frontendNames { - frontendPostConfigs, err := s.loadFrontendConfig(providerName, frontendName, config, - serverEntryPoints, - backendsHandlers, backendsHealthCheck) - if err != nil { - log.Errorf("%v. Skipping frontend %s...", err, frontendName) - } - - if len(frontendPostConfigs) > 0 { - postConfigs = append(postConfigs, frontendPostConfigs...) - } + // FIXME manage duplicates + conf := config.Configuration{ + Routers: make(map[string]*config.Router), + Middlewares: make(map[string]*config.Middleware), + Services: make(map[string]*config.Service), + } + for _, config := range configurations { + for key, value := range config.Middlewares { + conf.Middlewares[key] = value } + + for key, value := range config.Services { + conf.Services[key] = value + } + + for key, value := range config.Routers { + conf.Routers[key] = value + } + + conf.TLS = append(conf.TLS, config.TLS...) } - for _, postConfig := range postConfigs { - err := postConfig(backendsHandlers) - if err != nil { - log.Errorf("middleware post configuration error: %v", err) - } - } + handlers := s.applyConfiguration(ctx, conf) - healthcheck.GetHealthCheck(s.metricsRegistry).SetBackendsConfiguration(s.routinesPool.Ctx(), backendsHealthCheck) - - // Get new certificates list sorted per entrypoints + // Get new certificates list sorted per entry points // Update certificates entryPointsCertificates := s.loadHTTPSConfiguration(configurations, globalConfiguration.DefaultEntryPoints) - // Sort routes and update certificates - for serverEntryPointName, serverEntryPoint := range serverEntryPoints { - serverEntryPoint.httpRouter.GetHandler().SortRoutes() - if _, exists := entryPointsCertificates[serverEntryPointName]; exists { - serverEntryPoint.certs.DynamicCerts.Set(entryPointsCertificates[serverEntryPointName]) - } - } - - return serverEntryPoints + return handlers, entryPointsCertificates } -func (s *Server) loadFrontendConfig( - providerName string, frontendName string, config *types.Configuration, - serverEntryPoints map[string]*serverEntryPoint, - backendsHandlers map[string]http.Handler, backendsHealthCheck map[string]*healthcheck.BackendConfig, -) ([]handlerPostConfig, error) { +func (s *Server) applyConfiguration(ctx context.Context, configuration config.Configuration) map[string]http.Handler { + staticConfiguration := static.ConvertStaticConf(s.globalConfiguration) - frontend := config.Frontends[frontendName] - hostResolver := buildHostResolver(s.globalConfiguration) - - if len(frontend.EntryPoints) == 0 { - return nil, fmt.Errorf("no entrypoint defined for frontend %s", frontendName) + var entryPoints []string + for entryPointName := range s.entryPoints { + entryPoints = append(entryPoints, entryPointName) } - backend := config.Backends[frontend.Backend] - if backend == nil { - return nil, fmt.Errorf("undefined backend '%s' for frontend %s", frontend.Backend, frontendName) - } + serviceManager := service.NewManager(configuration.Services, s.defaultRoundTripper) + middlewaresBuilder := middleware.NewBuilder(configuration.Middlewares, serviceManager) + responseModifierFactory := responsemodifiers.NewBuilder(configuration.Middlewares) - frontendHash, err := frontend.Hash() - if err != nil { - return nil, fmt.Errorf("error calculating hash value for frontend %s: %v", frontendName, err) - } + routerManager := router.NewManager(configuration.Routers, serviceManager, middlewaresBuilder, responseModifierFactory) - var postConfigs []handlerPostConfig + handlers := routerManager.BuildHandlers(ctx, entryPoints, staticConfiguration.EntryPoints.Defaults) - for _, entryPointName := range frontend.EntryPoints { - log.Debugf("Wiring frontend %s to entryPoint %s", frontendName, entryPointName) + routerHandlers := make(map[string]http.Handler) - entryPoint := s.entryPoints[entryPointName].Configuration + for _, entryPointName := range entryPoints { + internalMuxRouter := mux.NewRouter(). + SkipClean(true) - if backendsHandlers[entryPointName+providerName+frontendHash] == nil { - log.Debugf("Creating backend %s", frontend.Backend) + ctx = log.With(ctx, log.Str(log.EntryPointName, entryPointName)) - handlers, responseModifier, postConfig, err := s.buildMiddlewares(frontendName, frontend, config.Backends, entryPointName, providerName) - if err != nil { - return nil, err - } + factory := s.entryPoints[entryPointName].RouteAppenderFactory + if factory != nil { + // FIXME remove currentConfigurations + appender := factory.NewAppender(ctx, middlewaresBuilder, &s.currentConfigurations) + appender.Append(internalMuxRouter) + } - if postConfig != nil { - postConfigs = append(postConfigs, postConfig) - } - - fwd, err := s.buildForwarder(entryPointName, entryPoint, frontendName, frontend, responseModifier, backend) - if err != nil { - return nil, fmt.Errorf("failed to create the forwarder for frontend %s: %v", frontendName, err) - } - - lb, healthCheckConfig, err := s.buildBalancerMiddlewares(frontendName, frontend, backend, fwd) - if err != nil { - return nil, err - } - - // Handler used by error pages - if backendsHandlers[entryPointName+providerName+frontend.Backend] == nil { - backendsHandlers[entryPointName+providerName+frontend.Backend] = lb - } - - if healthCheckConfig != nil { - backendsHealthCheck[entryPointName+providerName+frontendHash] = healthCheckConfig - } - - n := negroni.New() - - for _, handler := range handlers { - n.Use(handler) - } - - n.UseHandler(lb) - - backendsHandlers[entryPointName+providerName+frontendHash] = n + if h, ok := handlers[entryPointName]; ok { + internalMuxRouter.NotFoundHandler = h } else { - log.Debugf("Reusing backend %s [%s - %s - %s - %s]", - frontend.Backend, entryPointName, providerName, frontendName, frontendHash) + internalMuxRouter.NotFoundHandler = s.buildDefaultHTTPRouter() } - serverRoute, err := buildServerRoute(serverEntryPoints[entryPointName], frontendName, frontend, hostResolver) + routerHandlers[entryPointName] = internalMuxRouter + + chain := alice.New() + + if s.accessLoggerMiddleware != nil { + chain = chain.Append(accesslog.WrapHandler(s.accessLoggerMiddleware)) + } + + if s.tracer != nil { + chain = chain.Append(tracing.WrapEntryPointHandler(ctx, s.tracer, entryPointName)) + } + + chain = chain.Append(requestdecorator.WrapHandler(s.requestDecorator)) + + handler, err := chain.Then(internalMuxRouter.NotFoundHandler) if err != nil { - return nil, err - } - - handler := buildMatcherMiddlewares(serverRoute, backendsHandlers[entryPointName+providerName+frontendHash]) - serverRoute.Route.Handler(handler) - - err = serverRoute.Route.GetError() - if err != nil { - // FIXME error management - log.Errorf("Error building route: %s", err) + log.FromContext(ctx).Error(err) + continue } + internalMuxRouter.NotFoundHandler = handler } - return postConfigs, nil + return routerHandlers } -func (s *Server) buildForwarder(entryPointName string, entryPoint *configuration.EntryPoint, - frontendName string, frontend *types.Frontend, - responseModifier modifyResponse, backend *types.Backend) (http.Handler, error) { - - roundTripper, err := s.getRoundTripper(entryPointName, frontend.PassTLSCert, entryPoint.TLS) - if err != nil { - return nil, fmt.Errorf("failed to create RoundTripper for frontend %s: %v", frontendName, err) - } - - var flushInterval parse.Duration - if backend.ResponseForwarding != nil { - err := flushInterval.Set(backend.ResponseForwarding.FlushInterval) - if err != nil { - return nil, fmt.Errorf("error creating flush interval for frontend %s: %v", frontendName, err) - } - } - - var fwd http.Handler - fwd, err = forward.New( - forward.Stream(true), - forward.PassHostHeader(frontend.PassHostHeader), - forward.RoundTripper(roundTripper), - forward.ResponseModifier(responseModifier), - forward.BufferPool(s.bufferPool), - forward.StreamingFlushInterval(time.Duration(flushInterval)), - forward.WebsocketConnectionClosedHook(func(req *http.Request, conn net.Conn) { - server := req.Context().Value(http.ServerContextKey).(*http.Server) - if server != nil { - connState := server.ConnState - if connState != nil { - connState(conn, http.StateClosed) - } - } - }), - ) - if err != nil { - return nil, fmt.Errorf("error creating forwarder for frontend %s: %v", frontendName, err) - } - - if s.tracingMiddleware.IsEnabled() { - tm := s.tracingMiddleware.NewForwarderMiddleware(frontendName, frontend.Backend) - - next := fwd - fwd = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - tm.ServeHTTP(w, r, next.ServeHTTP) - }) - } - - fwd = pipelining.NewPipelining(fwd) - - return fwd, nil -} - -func buildServerRoute(serverEntryPoint *serverEntryPoint, frontendName string, frontend *types.Frontend, hostResolver *hostresolver.Resolver) (*types.ServerRoute, error) { - serverRoute := &types.ServerRoute{Route: serverEntryPoint.httpRouter.GetHandler().NewRoute().Name(frontendName)} - - priority := 0 - for routeName, route := range frontend.Routes { - rls := rules.Rules{Route: serverRoute, HostResolver: hostResolver} - newRoute, err := rls.Parse(route.Rule) - if err != nil { - return nil, fmt.Errorf("error creating route for frontend %s: %v", frontendName, err) - } - - serverRoute.Route = newRoute - - priority += len(route.Rule) - log.Debugf("Creating route %s %s", routeName, route.Rule) - } - - if frontend.Priority > 0 { - serverRoute.Route.Priority(frontend.Priority) - } else { - serverRoute.Route.Priority(priority) - } - - return serverRoute, nil -} - -func (s *Server) preLoadConfiguration(configMsg types.ConfigMessage) { +func (s *Server) preLoadConfiguration(configMsg config.Message) { providersThrottleDuration := time.Duration(s.globalConfiguration.ProvidersThrottleDuration) s.defaultConfigurationValues(configMsg.Configuration) - currentConfigurations := s.currentConfigurations.Get().(types.Configurations) + currentConfigurations := s.currentConfigurations.Get().(config.Configurations) + logger := log.WithoutContext().WithField(log.ProviderName, configMsg.ProviderName) if log.GetLevel() == logrus.DebugLevel { jsonConf, _ := json.Marshal(configMsg.Configuration) - log.Debugf("Configuration received from provider %s: %s", configMsg.ProviderName, string(jsonConf)) + logger.Debugf("Configuration received from provider %s: %s", configMsg.ProviderName, string(jsonConf)) } - if configMsg.Configuration == nil || configMsg.Configuration.Backends == nil && configMsg.Configuration.Frontends == nil && configMsg.Configuration.TLS == nil { - log.Infof("Skipping empty Configuration for provider %s", configMsg.ProviderName) + if configMsg.Configuration == nil || configMsg.Configuration.Routers == nil && configMsg.Configuration.Services == nil && configMsg.Configuration.Middlewares == nil && configMsg.Configuration.TLS == nil { + logger.Infof("Skipping empty Configuration for provider %s", configMsg.ProviderName) return } if reflect.DeepEqual(currentConfigurations[configMsg.ProviderName], configMsg.Configuration) { - log.Infof("Skipping same configuration for provider %s", configMsg.ProviderName) + logger.Infof("Skipping same configuration for provider %s", configMsg.ProviderName) return } providerConfigUpdateCh, ok := s.providerConfigUpdateMap[configMsg.ProviderName] if !ok { - providerConfigUpdateCh = make(chan types.ConfigMessage) + providerConfigUpdateCh = make(chan config.Message) s.providerConfigUpdateMap[configMsg.ProviderName] = providerConfigUpdateCh s.routinesPool.Go(func(stop chan bool) { s.throttleProviderConfigReload(providersThrottleDuration, s.configurationValidatedChan, providerConfigUpdateCh, stop) @@ -326,74 +206,8 @@ func (s *Server) preLoadConfiguration(configMsg types.ConfigMessage) { providerConfigUpdateCh <- configMsg } -func (s *Server) defaultConfigurationValues(configuration *types.Configuration) { - if configuration == nil || configuration.Frontends == nil { - return - } - s.configureFrontends(configuration.Frontends) - configureBackends(configuration.Backends) -} - -func (s *Server) configureFrontends(frontends map[string]*types.Frontend) { - defaultEntrypoints := s.globalConfiguration.DefaultEntryPoints - - for frontendName, frontend := range frontends { - // default endpoints if not defined in frontends - if len(frontend.EntryPoints) == 0 { - frontend.EntryPoints = defaultEntrypoints - } - - frontendEntryPoints, undefinedEntryPoints := s.filterEntryPoints(frontend.EntryPoints) - if len(undefinedEntryPoints) > 0 { - log.Errorf("Undefined entry point(s) '%s' for frontend %s", strings.Join(undefinedEntryPoints, ","), frontendName) - } - - frontend.EntryPoints = frontendEntryPoints - } -} - -func (s *Server) filterEntryPoints(entryPoints []string) ([]string, []string) { - var frontendEntryPoints []string - var undefinedEntryPoints []string - - for _, fepName := range entryPoints { - var exist bool - - for epName := range s.entryPoints { - if epName == fepName { - exist = true - break - } - } - - if exist { - frontendEntryPoints = append(frontendEntryPoints, fepName) - } else { - undefinedEntryPoints = append(undefinedEntryPoints, fepName) - } - } - - return frontendEntryPoints, undefinedEntryPoints -} - -func configureBackends(backends map[string]*types.Backend) { - for backendName := range backends { - backend := backends[backendName] - - _, err := types.NewLoadBalancerMethod(backend.LoadBalancer) - if err != nil { - log.Debugf("Backend %s: %v", backendName, err) - - var stickiness *types.Stickiness - if backend.LoadBalancer != nil { - stickiness = backend.LoadBalancer.Stickiness - } - backend.LoadBalancer = &types.LoadBalancer{ - Method: "wrr", - Stickiness: stickiness, - } - } - } +func (s *Server) defaultConfigurationValues(configuration *config.Configuration) { + // FIXME create a config hook } func (s *Server) listenConfigurations(stop chan bool) { @@ -414,7 +228,7 @@ func (s *Server) listenConfigurations(stop chan bool) { // It will immediately publish a new configuration and then only publish the next configuration after the throttle duration. // Note that in the case it receives N new configs in the timeframe of the throttle duration after publishing, // it will publish the last of the newly received configurations. -func (s *Server) throttleProviderConfigReload(throttle time.Duration, publish chan<- types.ConfigMessage, in <-chan types.ConfigMessage, stop chan bool) { +func (s *Server) throttleProviderConfigReload(throttle time.Duration, publish chan<- config.Message, in <-chan config.Message, stop chan bool) { ring := channels.NewRingChannel(1) defer ring.Close() @@ -424,7 +238,7 @@ func (s *Server) throttleProviderConfigReload(throttle time.Duration, publish ch case <-stop: return case nextConfig := <-ring.Out(): - if config, ok := nextConfig.(types.ConfigMessage); ok { + if config, ok := nextConfig.(config.Message); ok { publish <- config time.Sleep(throttle) } @@ -442,95 +256,53 @@ func (s *Server) throttleProviderConfigReload(throttle time.Duration, publish ch } } -func buildMatcherMiddlewares(serverRoute *types.ServerRoute, handler http.Handler) http.Handler { - // path replace - This needs to always be the very last on the handler chain (first in the order in this function) - // -- Replacing Path should happen at the very end of the Modifier chain, after all the Matcher+Modifiers ran - if len(serverRoute.ReplacePath) > 0 { - handler = &middlewares.ReplacePath{ - Path: serverRoute.ReplacePath, - Handler: handler, - } - } - - if len(serverRoute.ReplacePathRegex) > 0 { - sp := strings.Split(serverRoute.ReplacePathRegex, " ") - if len(sp) == 2 { - handler = middlewares.NewReplacePathRegexHandler(sp[0], sp[1], handler) - } else { - log.Warnf("Invalid syntax for ReplacePathRegex: %s. Separate the regular expression and the replacement by a space.", serverRoute.ReplacePathRegex) - } - } - - // add prefix - This needs to always be right before ReplacePath on the chain (second in order in this function) - // -- Adding Path Prefix should happen after all *Strip Matcher+Modifiers ran, but before Replace (in case it's configured) - if len(serverRoute.AddPrefix) > 0 { - handler = &middlewares.AddPrefix{ - Prefix: serverRoute.AddPrefix, - Handler: handler, - } - } - - // strip prefix - if len(serverRoute.StripPrefixes) > 0 { - handler = &middlewares.StripPrefix{ - Prefixes: serverRoute.StripPrefixes, - Handler: handler, - } - } - - // strip prefix with regex - if len(serverRoute.StripPrefixesRegex) > 0 { - handler = middlewares.NewStripPrefixRegex(handler, serverRoute.StripPrefixesRegex) - } - - return handler -} - func (s *Server) postLoadConfiguration() { - if s.metricsRegistry.IsEnabled() { - activeConfig := s.currentConfigurations.Get().(types.Configurations) - metrics.OnConfigurationUpdate(activeConfig) - } + // FIXME metrics + // if s.metricsRegistry.IsEnabled() { + // activeConfig := s.currentConfigurations.Get().(config.Configurations) + // metrics.OnConfigurationUpdate(activeConfig) + // } if s.globalConfiguration.ACME == nil || s.leadership == nil || !s.leadership.IsLeader() { return } - if s.globalConfiguration.ACME.OnHostRule { - currentConfigurations := s.currentConfigurations.Get().(types.Configurations) - for _, config := range currentConfigurations { - for _, frontend := range config.Frontends { - - // check if one of the frontend entrypoints is configured with TLS - // and is configured with ACME - acmeEnabled := false - for _, entryPoint := range frontend.EntryPoints { - if s.globalConfiguration.ACME.EntryPoint == entryPoint && s.entryPoints[entryPoint].Configuration.TLS != nil { - acmeEnabled = true - break - } - } - - if acmeEnabled { - for _, route := range frontend.Routes { - rls := rules.Rules{} - domains, err := rls.ParseDomains(route.Rule) - if err != nil { - log.Errorf("Error parsing domains: %v", err) - } else if len(domains) == 0 { - log.Debugf("No domain parsed in rule %q", route.Rule) - } else { - s.globalConfiguration.ACME.LoadCertificateForDomains(domains) - } - } - } - } - } - } + // FIXME acme + // if s.globalConfiguration.ACME.OnHostRule { + // currentConfigurations := s.currentConfigurations.Get().(config.Configurations) + // for _, config := range currentConfigurations { + // for _, frontend := range config.Frontends { + // + // // check if one of the frontend entrypoints is configured with TLS + // // and is configured with ACME + // acmeEnabled := false + // for _, entryPoint := range frontend.EntryPoints { + // if s.globalConfiguration.ACME.EntryPoint == entryPoint && s.entryPoints[entryPoint].Configuration.TLS != nil { + // acmeEnabled = true + // break + // } + // } + // + // if acmeEnabled { + // for _, route := range frontend.Routes { + // rls := rules.Rules{} + // domains, err := rls.ParseDomains(route.Rule) + // if err != nil { + // log.Errorf("Error parsing domains: %v", err) + // } else if len(domains) == 0 { + // log.Debugf("No domain parsed in rule %q", route.Rule) + // } else { + // s.globalConfiguration.ACME.LoadCertificateForDomains(domains) + // } + // } + // } + // } + // } + // } } // loadHTTPSConfiguration add/delete HTTPS certificate managed dynamically -func (s *Server) loadHTTPSConfiguration(configurations types.Configurations, defaultEntryPoints configuration.DefaultEntryPoints) map[string]map[string]*tls.Certificate { +func (s *Server) loadHTTPSConfiguration(configurations config.Configurations, defaultEntryPoints configuration.DefaultEntryPoints) map[string]map[string]*tls.Certificate { newEPCertificates := make(map[string]map[string]*tls.Certificate) // Get all certificates for _, config := range configurations { @@ -543,9 +315,14 @@ func (s *Server) loadHTTPSConfiguration(configurations types.Configurations, def func (s *Server) buildServerEntryPoints() map[string]*serverEntryPoint { serverEntryPoints := make(map[string]*serverEntryPoint) + + ctx := context.Background() + + handlers := s.applyConfiguration(ctx, config.Configuration{}) + for entryPointName, entryPoint := range s.entryPoints { serverEntryPoints[entryPointName] = &serverEntryPoint{ - httpRouter: middlewares.NewHandlerSwitcher(s.buildDefaultHTTPRouter()), + httpRouter: middlewares.NewHandlerSwitcher(handlers[entryPointName]), onDemandListener: entryPoint.OnDemandListener, tlsALPNGetter: entryPoint.TLSALPNGetter, } @@ -557,19 +334,21 @@ func (s *Server) buildServerEntryPoints() map[string]*serverEntryPoint { } if entryPoint.Configuration.TLS != nil { + logger := log.FromContext(ctx).WithField(log.EntryPointName, entryPointName) + serverEntryPoints[entryPointName].certs.SniStrict = entryPoint.Configuration.TLS.SniStrict if entryPoint.Configuration.TLS.DefaultCertificate != nil { cert, err := buildDefaultCertificate(entryPoint.Configuration.TLS.DefaultCertificate) if err != nil { - log.Error(err) + logger.Error(err) continue } serverEntryPoints[entryPointName].certs.DefaultCertificate = cert } else { cert, err := generate.DefaultCertificate() if err != nil { - log.Errorf("failed to generate default certificate: %v", err) + logger.Error(err) continue } serverEntryPoints[entryPointName].certs.DefaultCertificate = cert @@ -585,6 +364,13 @@ func (s *Server) buildServerEntryPoints() map[string]*serverEntryPoint { return serverEntryPoints } +func (s *Server) buildDefaultHTTPRouter() *mux.Router { + rt := mux.NewRouter() + rt.NotFoundHandler = http.HandlerFunc(http.NotFound) + rt.SkipClean(true) + return rt +} + func buildDefaultCertificate(defaultCertificate *traefiktls.Certificate) (*tls.Certificate, error) { certFile, err := defaultCertificate.CertFile.Read() if err != nil { @@ -602,31 +388,3 @@ func buildDefaultCertificate(defaultCertificate *traefiktls.Certificate) (*tls.C } return &cert, nil } - -func (s *Server) buildDefaultHTTPRouter() *mux.Router { - rt := mux.NewRouter() - rt.NotFoundHandler = s.wrapHTTPHandlerWithAccessLog(http.HandlerFunc(http.NotFound), "backend not found") - rt.StrictSlash(!s.globalConfiguration.KeepTrailingSlash) - rt.SkipClean(true) - return rt -} - -func sortedFrontendNamesForConfig(configuration *types.Configuration) []string { - var keys []string - for key := range configuration.Frontends { - keys = append(keys, key) - } - sort.Strings(keys) - return keys -} - -func buildHostResolver(globalConfig configuration.GlobalConfiguration) *hostresolver.Resolver { - if globalConfig.HostResolver != nil { - return &hostresolver.Resolver{ - CnameFlattening: globalConfig.HostResolver.CnameFlattening, - ResolvConfig: globalConfig.HostResolver.ResolvConfig, - ResolvDepth: globalConfig.HostResolver.ResolvDepth, - } - } - return nil -} diff --git a/server/server_configuration_test.go b/server/server_configuration_test.go index b08b3779c..e44c3b337 100644 --- a/server/server_configuration_test.go +++ b/server/server_configuration_test.go @@ -1,25 +1,16 @@ package server import ( - "fmt" "net/http" "net/http/httptest" - "net/url" "testing" "time" - "github.com/containous/flaeg/parse" - "github.com/containous/mux" - "github.com/containous/traefik/configuration" - "github.com/containous/traefik/healthcheck" - "github.com/containous/traefik/middlewares" - "github.com/containous/traefik/rules" + "github.com/containous/traefik/config" + "github.com/containous/traefik/old/configuration" th "github.com/containous/traefik/testhelpers" "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/vulcand/oxy/roundrobin" ) // LocalhostCert is a PEM-encoded TLS cert with SAN IPs @@ -60,137 +51,6 @@ f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA== -----END RSA PRIVATE KEY-----`) ) -type testLoadBalancer struct{} - -func (lb *testLoadBalancer) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // noop -} - -func (lb *testLoadBalancer) RemoveServer(u *url.URL) error { - return nil -} - -func (lb *testLoadBalancer) UpsertServer(u *url.URL, options ...roundrobin.ServerOption) error { - return nil -} - -func (lb *testLoadBalancer) Servers() []*url.URL { - return []*url.URL{} -} - -func TestServerLoadConfigHealthCheckOptions(t *testing.T) { - healthChecks := []*types.HealthCheck{ - nil, - { - Path: "/path", - }, - } - - for _, lbMethod := range []string{"Wrr", "Drr"} { - for _, healthCheck := range healthChecks { - t.Run(fmt.Sprintf("%s/hc=%t", lbMethod, healthCheck != nil), func(t *testing.T) { - globalConfig := configuration.GlobalConfiguration{ - HealthCheck: &configuration.HealthCheckConfig{ - Interval: parse.Duration(5 * time.Second), - Timeout: parse.Duration(3 * time.Second), - }, - } - entryPoints := map[string]EntryPoint{ - "http": { - Configuration: &configuration.EntryPoint{ - ForwardedHeaders: &configuration.ForwardedHeaders{Insecure: true}, - }, - }, - } - - dynamicConfigs := types.Configurations{ - "config": &types.Configuration{ - Frontends: map[string]*types.Frontend{ - "frontend": { - EntryPoints: []string{"http"}, - Backend: "backend", - }, - }, - Backends: map[string]*types.Backend{ - "backend": { - Servers: map[string]types.Server{ - "server": { - URL: "http://localhost", - }, - }, - LoadBalancer: &types.LoadBalancer{ - Method: lbMethod, - }, - HealthCheck: healthCheck, - }, - }, - TLS: []*tls.Configuration{ - { - Certificate: &tls.Certificate{ - CertFile: localhostCert, - KeyFile: localhostKey, - }, - EntryPoints: []string{"http"}, - }, - }, - }, - } - - srv := NewServer(globalConfig, nil, entryPoints) - - _ = srv.loadConfig(dynamicConfigs, globalConfig) - - expectedNumHealthCheckBackends := 0 - if healthCheck != nil { - expectedNumHealthCheckBackends = 1 - } - assert.Len(t, healthcheck.GetHealthCheck(th.NewCollectingHealthCheckMetrics()).Backends, expectedNumHealthCheckBackends, "health check backends") - }) - } - } -} - -func TestServerLoadConfigEmptyBasicAuth(t *testing.T) { - globalConfig := configuration.GlobalConfiguration{ - EntryPoints: configuration.EntryPoints{ - "http": &configuration.EntryPoint{ForwardedHeaders: &configuration.ForwardedHeaders{Insecure: true}}, - }, - } - - dynamicConfigs := types.Configurations{ - "config": &types.Configuration{ - Frontends: map[string]*types.Frontend{ - "frontend": { - EntryPoints: []string{"http"}, - Backend: "backend", - }, - }, - Backends: map[string]*types.Backend{ - "backend": { - Servers: map[string]types.Server{ - "server": { - URL: "http://localhost", - }, - }, - LoadBalancer: &types.LoadBalancer{ - Method: "Wrr", - }, - }, - }, - }, - } - - entryPoints := map[string]EntryPoint{} - for key, value := range globalConfig.EntryPoints { - entryPoints[key] = EntryPoint{ - Configuration: value, - } - } - - srv := NewServer(globalConfig, nil, entryPoints) - _ = srv.loadConfig(dynamicConfigs, globalConfig) -} - func TestServerLoadCertificateWithDefaultEntryPoint(t *testing.T) { globalConfig := configuration.GlobalConfiguration{ DefaultEntryPoints: []string{"http", "https"}, @@ -200,8 +60,8 @@ func TestServerLoadCertificateWithDefaultEntryPoint(t *testing.T) { "http": {Configuration: &configuration.EntryPoint{}}, } - dynamicConfigs := types.Configurations{ - "config": &types.Configuration{ + dynamicConfigs := config.Configurations{ + "config": &config.Configuration{ TLS: []*tls.Configuration{ { Certificate: &tls.Certificate{ @@ -214,61 +74,58 @@ func TestServerLoadCertificateWithDefaultEntryPoint(t *testing.T) { } srv := NewServer(globalConfig, nil, entryPoints) - - mapEntryPoints := srv.loadConfig(dynamicConfigs, globalConfig) - if !mapEntryPoints["https"].certs.ContainsCertificates() { + _, mapsCerts := srv.loadConfig(dynamicConfigs, globalConfig) + if len(mapsCerts["https"]) == 0 { t.Fatal("got error: https entryPoint must have TLS certificates.") } } -func TestReuseBackend(t *testing.T) { +func TestReuseService(t *testing.T) { testServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { rw.WriteHeader(http.StatusOK) })) defer testServer.Close() - globalConfig := configuration.GlobalConfiguration{ - DefaultEntryPoints: []string{"http"}, - } - entryPoints := map[string]EntryPoint{ "http": {Configuration: &configuration.EntryPoint{ ForwardedHeaders: &configuration.ForwardedHeaders{Insecure: true}, }}, } - dynamicConfigs := types.Configurations{ + globalConfig := configuration.GlobalConfiguration{ + DefaultEntryPoints: []string{"http"}, + } + + dynamicConfigs := config.Configurations{ "config": th.BuildConfiguration( - th.WithFrontends( - th.WithFrontend("backend", - th.WithFrontendName("frontend0"), + th.WithRouters( + th.WithRouter("foo", + th.WithServiceName("bar"), + th.WithRule("Path:/ok")), + th.WithRouter("foo2", th.WithEntryPoints("http"), - th.WithRoutes(th.WithRoute("/ok", "Path: /ok"))), - th.WithFrontend("backend", - th.WithFrontendName("frontend1"), - th.WithEntryPoints("http"), - th.WithRoutes(th.WithRoute("/unauthorized", "Path: /unauthorized")), - th.WithFrontEndAuth(&types.Auth{ - Basic: &types.Basic{ - Users: []string{"foo:bar"}, - }, - })), + th.WithRule("Path:/unauthorized"), + th.WithServiceName("bar"), + th.WithRouterMiddlewares("basicauth")), ), - th.WithBackends(th.WithBackendNew("backend", + th.WithMiddlewares(th.WithMiddleware("basicauth", + th.WithBasicAuth(&config.BasicAuth{Users: []string{"foo:bar"}}), + )), + th.WithLoadBalancerServices(th.WithService("bar", th.WithLBMethod("wrr"), - th.WithServersNew(th.WithServerNew(testServer.URL))), + th.WithServers(th.WithServer(testServer.URL))), ), ), } srv := NewServer(globalConfig, nil, entryPoints) - serverEntryPoints := srv.loadConfig(dynamicConfigs, globalConfig) + serverEntryPoints, _ := srv.loadConfig(dynamicConfigs, globalConfig) // Test that the /ok path returns a status 200. responseRecorderOk := &httptest.ResponseRecorder{} requestOk := httptest.NewRequest(http.MethodGet, testServer.URL+"/ok", nil) - serverEntryPoints["http"].httpRouter.ServeHTTP(responseRecorderOk, requestOk) + serverEntryPoints["http"].ServeHTTP(responseRecorderOk, requestOk) assert.Equal(t, http.StatusOK, responseRecorderOk.Result().StatusCode, "status code") @@ -276,15 +133,15 @@ func TestReuseBackend(t *testing.T) { // the basic authentication defined on the frontend. responseRecorderUnauthorized := &httptest.ResponseRecorder{} requestUnauthorized := httptest.NewRequest(http.MethodGet, testServer.URL+"/unauthorized", nil) - serverEntryPoints["http"].httpRouter.ServeHTTP(responseRecorderUnauthorized, requestUnauthorized) + serverEntryPoints["http"].ServeHTTP(responseRecorderUnauthorized, requestUnauthorized) assert.Equal(t, http.StatusUnauthorized, responseRecorderUnauthorized.Result().StatusCode, "status code") } func TestThrottleProviderConfigReload(t *testing.T) { throttleDuration := 30 * time.Millisecond - publishConfig := make(chan types.ConfigMessage) - providerConfig := make(chan types.ConfigMessage) + publishConfig := make(chan config.Message) + providerConfig := make(chan config.Message) stop := make(chan bool) defer func() { stop <- true @@ -312,7 +169,7 @@ func TestThrottleProviderConfigReload(t *testing.T) { // publish 5 new configs, one new config each 10 milliseconds for i := 0; i < 5; i++ { - providerConfig <- types.ConfigMessage{} + providerConfig <- config.Message{} time.Sleep(10 * time.Millisecond) } @@ -335,205 +192,3 @@ func TestThrottleProviderConfigReload(t *testing.T) { t.Error("Last config was not published in time") } } - -func TestServerMultipleFrontendRules(t *testing.T) { - testCases := []struct { - expression string - requestURL string - expectedURL string - }{ - { - expression: "Host:foo.bar", - requestURL: "http://foo.bar", - expectedURL: "http://foo.bar", - }, - { - expression: "PathPrefix:/management;ReplacePath:/health", - requestURL: "http://foo.bar/management", - expectedURL: "http://foo.bar/health", - }, - { - expression: "Host:foo.bar;AddPrefix:/blah", - requestURL: "http://foo.bar/baz", - expectedURL: "http://foo.bar/blah/baz", - }, - { - expression: "PathPrefixStripRegex:/one/{two}/{three:[0-9]+}", - requestURL: "http://foo.bar/one/some/12345/four", - expectedURL: "http://foo.bar/four", - }, - { - expression: "PathPrefixStripRegex:/one/{two}/{three:[0-9]+};AddPrefix:/zero", - requestURL: "http://foo.bar/one/some/12345/four", - expectedURL: "http://foo.bar/zero/four", - }, - { - expression: "AddPrefix:/blah;ReplacePath:/baz", - requestURL: "http://foo.bar/hello", - expectedURL: "http://foo.bar/baz", - }, - { - expression: "PathPrefixStrip:/management;ReplacePath:/health", - requestURL: "http://foo.bar/management", - expectedURL: "http://foo.bar/health", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.expression, func(t *testing.T) { - t.Parallel() - - router := mux.NewRouter() - route := router.NewRoute() - serverRoute := &types.ServerRoute{Route: route} - reqHostMid := &middlewares.RequestHost{} - rls := &rules.Rules{Route: serverRoute} - - expression := test.expression - routeResult, err := rls.Parse(expression) - - if err != nil { - t.Fatalf("Error while building route for %s: %+v", expression, err) - } - - request := th.MustNewRequest(http.MethodGet, test.requestURL, nil) - var routeMatch bool - reqHostMid.ServeHTTP(nil, request, func(w http.ResponseWriter, r *http.Request) { - routeMatch = routeResult.Match(r, &mux.RouteMatch{Route: routeResult}) - }) - - if !routeMatch { - t.Fatalf("Rule %s doesn't match", expression) - } - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, test.expectedURL, r.URL.String(), "URL") - }) - - hd := buildMatcherMiddlewares(serverRoute, handler) - serverRoute.Route.Handler(hd) - - serverRoute.Route.GetHandler().ServeHTTP(nil, request) - }) - } -} - -func TestServerBuildHealthCheckOptions(t *testing.T) { - lb := &testLoadBalancer{} - globalInterval := 15 * time.Second - globalTimeout := 3 * time.Second - - testCases := []struct { - desc string - hc *types.HealthCheck - expectedOpts *healthcheck.Options - }{ - { - desc: "nil health check", - hc: nil, - expectedOpts: nil, - }, - { - desc: "empty path", - hc: &types.HealthCheck{ - Path: "", - }, - expectedOpts: nil, - }, - { - desc: "unparseable interval", - hc: &types.HealthCheck{ - Path: "/path", - Interval: "unparseable", - }, - expectedOpts: &healthcheck.Options{ - Path: "/path", - Interval: globalInterval, - LB: lb, - Timeout: 3 * time.Second, - }, - }, - { - desc: "sub-zero interval", - hc: &types.HealthCheck{ - Path: "/path", - Interval: "-42s", - }, - expectedOpts: &healthcheck.Options{ - Path: "/path", - Interval: globalInterval, - LB: lb, - Timeout: 3 * time.Second, - }, - }, - { - desc: "parseable interval", - hc: &types.HealthCheck{ - Path: "/path", - Interval: "5m", - }, - expectedOpts: &healthcheck.Options{ - Path: "/path", - Interval: 5 * time.Minute, - LB: lb, - Timeout: 3 * time.Second, - }, - }, - { - desc: "unparseable timeout", - hc: &types.HealthCheck{ - Path: "/path", - Interval: "15s", - Timeout: "unparseable", - }, - expectedOpts: &healthcheck.Options{ - Path: "/path", - Interval: globalInterval, - Timeout: globalTimeout, - LB: lb, - }, - }, - { - desc: "sub-zero timeout", - hc: &types.HealthCheck{ - Path: "/path", - Interval: "15s", - Timeout: "-42s", - }, - expectedOpts: &healthcheck.Options{ - Path: "/path", - Interval: globalInterval, - Timeout: globalTimeout, - LB: lb, - }, - }, - { - desc: "parseable timeout", - hc: &types.HealthCheck{ - Path: "/path", - Interval: "15s", - Timeout: "10s", - }, - expectedOpts: &healthcheck.Options{ - Path: "/path", - Interval: globalInterval, - Timeout: 10 * time.Second, - LB: lb, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - opts := buildHealthCheckOptions(lb, "backend", test.hc, &configuration.HealthCheckConfig{ - Interval: parse.Duration(globalInterval), - Timeout: parse.Duration(globalTimeout), - }) - assert.Equal(t, test.expectedOpts, opts, "health check options") - }) - } -} diff --git a/server/server_loadbalancer.go b/server/server_loadbalancer.go deleted file mode 100644 index 393ab6ebc..000000000 --- a/server/server_loadbalancer.go +++ /dev/null @@ -1,439 +0,0 @@ -package server - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "net" - "net/http" - "net/url" - "time" - - "github.com/containous/traefik/configuration" - "github.com/containous/traefik/healthcheck" - "github.com/containous/traefik/log" - "github.com/containous/traefik/middlewares" - "github.com/containous/traefik/middlewares/accesslog" - "github.com/containous/traefik/server/cookie" - traefiktls "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" - "github.com/vulcand/oxy/buffer" - "github.com/vulcand/oxy/connlimit" - "github.com/vulcand/oxy/ratelimit" - "github.com/vulcand/oxy/roundrobin" - "github.com/vulcand/oxy/utils" - "golang.org/x/net/http2" -) - -type h2cTransportWrapper struct { - *http2.Transport -} - -func (t *h2cTransportWrapper) RoundTrip(req *http.Request) (*http.Response, error) { - req.URL.Scheme = "http" - return t.Transport.RoundTrip(req) -} - -func (s *Server) buildBalancerMiddlewares(frontendName string, frontend *types.Frontend, backend *types.Backend, fwd http.Handler) (http.Handler, *healthcheck.BackendConfig, error) { - balancer, err := s.buildLoadBalancer(frontendName, frontend.Backend, backend, fwd) - if err != nil { - return nil, nil, err - } - - // Health Check - var backendHealthCheck *healthcheck.BackendConfig - if hcOpts := buildHealthCheckOptions(balancer, frontend.Backend, backend.HealthCheck, s.globalConfiguration.HealthCheck); hcOpts != nil { - log.Debugf("Setting up backend health check %s", *hcOpts) - - hcOpts.Transport = s.defaultForwardingRoundTripper - backendHealthCheck = healthcheck.NewBackendConfig(*hcOpts, frontend.Backend) - } - - // Empty (backend with no servers) - var lb http.Handler = middlewares.NewEmptyBackendHandler(balancer) - - // Rate Limit - if frontend.RateLimit != nil && len(frontend.RateLimit.RateSet) > 0 { - handler, err := buildRateLimiter(lb, frontend.RateLimit) - if err != nil { - return nil, nil, fmt.Errorf("error creating rate limiter: %v", err) - } - - lb = s.wrapHTTPHandlerWithAccessLog( - s.tracingMiddleware.NewHTTPHandlerWrapper("Rate limit", handler, false), - fmt.Sprintf("rate limit for %s", frontendName), - ) - } - - // Max Connections - if backend.MaxConn != nil && backend.MaxConn.Amount != 0 { - log.Debugf("Creating load-balancer connection limit") - - handler, err := buildMaxConn(lb, backend.MaxConn) - if err != nil { - return nil, nil, err - } - lb = s.wrapHTTPHandlerWithAccessLog(handler, fmt.Sprintf("connection limit for %s", frontendName)) - } - - // Retry - if s.globalConfiguration.Retry != nil { - handler := s.buildRetryMiddleware(lb, s.globalConfiguration.Retry, len(backend.Servers), frontend.Backend) - lb = s.tracingMiddleware.NewHTTPHandlerWrapper("Retry", handler, false) - } - - // Buffering - if backend.Buffering != nil { - handler, err := buildBufferingMiddleware(lb, backend.Buffering) - if err != nil { - return nil, nil, fmt.Errorf("error setting up buffering middleware: %s", err) - } - - // TODO refactor ? - lb = handler - } - - // Circuit Breaker - if backend.CircuitBreaker != nil { - log.Debugf("Creating circuit breaker %s", backend.CircuitBreaker.Expression) - - expression := backend.CircuitBreaker.Expression - circuitBreaker, err := middlewares.NewCircuitBreaker(lb, expression, middlewares.NewCircuitBreakerOptions(expression)) - if err != nil { - return nil, nil, fmt.Errorf("error creating circuit breaker: %v", err) - } - - lb = s.tracingMiddleware.NewHTTPHandlerWrapper("Circuit breaker", circuitBreaker, false) - } - - return lb, backendHealthCheck, nil -} - -func (s *Server) buildLoadBalancer(frontendName string, backendName string, backend *types.Backend, fwd http.Handler) (healthcheck.BalancerHandler, error) { - var rr *roundrobin.RoundRobin - var saveFrontend http.Handler - - if s.accessLoggerMiddleware != nil { - saveUsername := accesslog.NewSaveUsername(fwd) - saveBackend := accesslog.NewSaveBackend(saveUsername, backendName) - saveFrontend = accesslog.NewSaveFrontend(saveBackend, frontendName) - rr, _ = roundrobin.New(saveFrontend) - } else { - rr, _ = roundrobin.New(fwd) - } - - var stickySession *roundrobin.StickySession - var cookieName string - if stickiness := backend.LoadBalancer.Stickiness; stickiness != nil { - cookieName = cookie.GetName(stickiness.CookieName, backendName) - stickySession = roundrobin.NewStickySession(cookieName) - } - - lbMethod, err := types.NewLoadBalancerMethod(backend.LoadBalancer) - if err != nil { - return nil, fmt.Errorf("error loading load balancer method '%+v' for frontend %s: %v", backend.LoadBalancer, frontendName, err) - } - - var lb healthcheck.BalancerHandler - - switch lbMethod { - case types.Drr: - log.Debug("Creating load-balancer drr") - - if stickySession != nil { - log.Debugf("Sticky session with cookie %v", cookieName) - - lb, err = roundrobin.NewRebalancer(rr, roundrobin.RebalancerStickySession(stickySession)) - if err != nil { - return nil, err - } - } else { - lb, err = roundrobin.NewRebalancer(rr) - if err != nil { - return nil, err - } - } - case types.Wrr: - log.Debug("Creating load-balancer wrr") - - if stickySession != nil { - log.Debugf("Sticky session with cookie %v", cookieName) - - if s.accessLoggerMiddleware != nil { - lb, err = roundrobin.New(saveFrontend, roundrobin.EnableStickySession(stickySession)) - if err != nil { - return nil, err - } - } else { - lb, err = roundrobin.New(fwd, roundrobin.EnableStickySession(stickySession)) - if err != nil { - return nil, err - } - } - } else { - lb = rr - } - default: - return nil, fmt.Errorf("invalid load-balancing method %q", lbMethod) - } - - if err := s.configureLBServers(lb, backend, backendName); err != nil { - return nil, fmt.Errorf("error configuring load balancer for frontend %s: %v", frontendName, err) - } - - return lb, nil -} - -func (s *Server) configureLBServers(lb healthcheck.BalancerHandler, backend *types.Backend, backendName string) error { - for name, srv := range backend.Servers { - u, err := url.Parse(srv.URL) - if err != nil { - return fmt.Errorf("error parsing server URL %s: %v", srv.URL, err) - } - - log.Debugf("Creating server %s at %s with weight %d", name, u, srv.Weight) - - if err := lb.UpsertServer(u, roundrobin.Weight(srv.Weight)); err != nil { - return fmt.Errorf("error adding server %s to load balancer: %v", srv.URL, err) - } - - s.metricsRegistry.BackendServerUpGauge().With("backend", backendName, "url", srv.URL).Set(1) - } - return nil -} - -// getRoundTripper will either use server.defaultForwardingRoundTripper or create a new one -// given a custom TLS configuration is passed and the passTLSCert option is set to true. -func (s *Server) getRoundTripper(entryPointName string, passTLSCert bool, tls *traefiktls.TLS) (http.RoundTripper, error) { - if passTLSCert { - tlsConfig, err := createClientTLSConfig(entryPointName, tls) - if err != nil { - return nil, fmt.Errorf("failed to create TLSClientConfig: %v", err) - } - - transport, err := createHTTPTransport(s.globalConfiguration) - if err != nil { - return nil, fmt.Errorf("failed to create HTTP transport: %v", err) - } - - transport.TLSClientConfig = tlsConfig - return transport, nil - } - - return s.defaultForwardingRoundTripper, nil -} - -// createHTTPTransport creates an http.Transport configured with the GlobalConfiguration settings. -// For the settings that can't be configured in Traefik it uses the default http.Transport settings. -// An exception to this is the MaxIdleConns setting as we only provide the option MaxIdleConnsPerHost -// in Traefik at this point in time. Setting this value to the default of 100 could lead to confusing -// behavior and backwards compatibility issues. -func createHTTPTransport(globalConfiguration configuration.GlobalConfiguration) (*http.Transport, error) { - dialer := &net.Dialer{ - Timeout: configuration.DefaultDialTimeout, - KeepAlive: 30 * time.Second, - DualStack: true, - } - - if globalConfiguration.ForwardingTimeouts != nil { - dialer.Timeout = time.Duration(globalConfiguration.ForwardingTimeouts.DialTimeout) - } - - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: dialer.DialContext, - MaxIdleConnsPerHost: globalConfiguration.MaxIdleConnsPerHost, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } - - transport.RegisterProtocol("h2c", &h2cTransportWrapper{ - Transport: &http2.Transport{ - DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) { - return net.Dial(netw, addr) - }, - AllowHTTP: true, - }, - }) - - if globalConfiguration.ForwardingTimeouts != nil { - transport.ResponseHeaderTimeout = time.Duration(globalConfiguration.ForwardingTimeouts.ResponseHeaderTimeout) - } - - if globalConfiguration.InsecureSkipVerify { - transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - } - - if len(globalConfiguration.RootCAs) > 0 { - transport.TLSClientConfig = &tls.Config{ - RootCAs: createRootCACertPool(globalConfiguration.RootCAs), - } - } - - err := http2.ConfigureTransport(transport) - if err != nil { - return nil, err - } - - return transport, nil -} - -func createRootCACertPool(rootCAs traefiktls.FilesOrContents) *x509.CertPool { - roots := x509.NewCertPool() - - for _, cert := range rootCAs { - certContent, err := cert.Read() - if err != nil { - log.Error("Error while read RootCAs", err) - continue - } - roots.AppendCertsFromPEM(certContent) - } - - return roots -} - -func createClientTLSConfig(entryPointName string, tlsOption *traefiktls.TLS) (*tls.Config, error) { - if tlsOption == nil { - return nil, errors.New("no TLS provided") - } - - config, err := tlsOption.Certificates.CreateTLSConfig(entryPointName) - if err != nil { - return nil, err - } - - if len(tlsOption.ClientCA.Files) > 0 { - pool := x509.NewCertPool() - for _, caFile := range tlsOption.ClientCA.Files { - data, err := caFile.Read() - if err != nil { - return nil, err - } - - if !pool.AppendCertsFromPEM(data) { - return nil, fmt.Errorf("invalid certificate(s) in %s", caFile) - } - } - config.RootCAs = pool - } - - config.BuildNameToCertificate() - - return config, nil -} - -func (s *Server) buildRetryMiddleware(handler http.Handler, retry *configuration.Retry, countServers int, backendName string) http.Handler { - retryListeners := middlewares.RetryListeners{} - if s.metricsRegistry.IsEnabled() { - retryListeners = append(retryListeners, middlewares.NewMetricsRetryListener(s.metricsRegistry, backendName)) - } - if s.accessLoggerMiddleware != nil { - retryListeners = append(retryListeners, &accesslog.SaveRetries{}) - } - - retryAttempts := countServers - if retry.Attempts > 0 { - retryAttempts = retry.Attempts - } - - log.Debugf("Creating retries max attempts %d", retryAttempts) - - return middlewares.NewRetry(retryAttempts, handler, retryListeners) -} - -func buildRateLimiter(handler http.Handler, rlConfig *types.RateLimit) (http.Handler, error) { - extractFunc, err := utils.NewExtractor(rlConfig.ExtractorFunc) - if err != nil { - return nil, err - } - - log.Debugf("Creating load-balancer rate limiter") - - rateSet := ratelimit.NewRateSet() - for _, rate := range rlConfig.RateSet { - if err := rateSet.Add(time.Duration(rate.Period), rate.Average, rate.Burst); err != nil { - return nil, err - } - } - - return ratelimit.New(handler, extractFunc, rateSet) -} - -func buildBufferingMiddleware(handler http.Handler, config *types.Buffering) (http.Handler, error) { - log.Debugf("Setting up buffering: request limits: %d (mem), %d (max), response limits: %d (mem), %d (max) with retry: '%s'", - config.MemRequestBodyBytes, config.MaxRequestBodyBytes, config.MemResponseBodyBytes, - config.MaxResponseBodyBytes, config.RetryExpression) - - return buffer.New( - handler, - buffer.MemRequestBodyBytes(config.MemRequestBodyBytes), - buffer.MaxRequestBodyBytes(config.MaxRequestBodyBytes), - buffer.MemResponseBodyBytes(config.MemResponseBodyBytes), - buffer.MaxResponseBodyBytes(config.MaxResponseBodyBytes), - buffer.CondSetter(len(config.RetryExpression) > 0, buffer.Retry(config.RetryExpression)), - ) -} - -func buildMaxConn(lb http.Handler, maxConns *types.MaxConn) (http.Handler, error) { - extractFunc, err := utils.NewExtractor(maxConns.ExtractorFunc) - if err != nil { - return nil, fmt.Errorf("error creating connection limit: %v", err) - } - - log.Debugf("Creating load-balancer connection limit") - - handler, err := connlimit.New(lb, extractFunc, maxConns.Amount) - if err != nil { - return nil, fmt.Errorf("error creating connection limit: %v", err) - } - - return handler, nil -} - -func buildHealthCheckOptions(lb healthcheck.BalancerHandler, backend string, hc *types.HealthCheck, hcConfig *configuration.HealthCheckConfig) *healthcheck.Options { - if hc == nil || hc.Path == "" || hcConfig == nil { - return nil - } - - interval := time.Duration(hcConfig.Interval) - if hc.Interval != "" { - intervalOverride, err := time.ParseDuration(hc.Interval) - if err != nil { - log.Errorf("Illegal health check interval for backend '%s': %s", backend, err) - } else if intervalOverride <= 0 { - log.Errorf("Health check interval smaller than zero for backend '%s', backend", backend) - } else { - interval = intervalOverride - } - } - - timeout := time.Duration(hcConfig.Timeout) - if hc.Timeout != "" { - timeoutOverride, err := time.ParseDuration(hc.Timeout) - if err != nil { - log.Errorf("Illegal health check timeout for backend '%s': %s", backend, err) - } else if timeoutOverride <= 0 { - log.Errorf("Health check timeout smaller than zero for backend '%s', backend", backend) - } else { - timeout = timeoutOverride - } - } - - if timeout >= interval { - log.Warnf("Health check timeout for backend '%s' should be lower than the health check interval. Interval set to timeout + 1 second (%s).", backend) - } - - return &healthcheck.Options{ - Scheme: hc.Scheme, - Path: hc.Path, - Port: hc.Port, - Interval: interval, - Timeout: timeout, - LB: lb, - Hostname: hc.Hostname, - Headers: hc.Headers, - } -} diff --git a/server/server_loadbalancer_test.go b/server/server_loadbalancer_test.go deleted file mode 100644 index 9af963a5d..000000000 --- a/server/server_loadbalancer_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package server - -import ( - "testing" - - "github.com/containous/traefik/types" - "github.com/stretchr/testify/assert" -) - -func TestConfigureBackends(t *testing.T) { - validMethod := "Drr" - defaultMethod := "wrr" - - testCases := []struct { - desc string - lb *types.LoadBalancer - expectedMethod string - expectedStickiness *types.Stickiness - }{ - { - desc: "valid load balancer method with sticky enabled", - lb: &types.LoadBalancer{ - Method: validMethod, - Stickiness: &types.Stickiness{}, - }, - expectedMethod: validMethod, - expectedStickiness: &types.Stickiness{}, - }, - { - desc: "valid load balancer method with sticky disabled", - lb: &types.LoadBalancer{ - Method: validMethod, - Stickiness: nil, - }, - expectedMethod: validMethod, - }, - { - desc: "invalid load balancer method with sticky enabled", - lb: &types.LoadBalancer{ - Method: "Invalid", - Stickiness: &types.Stickiness{}, - }, - expectedMethod: defaultMethod, - expectedStickiness: &types.Stickiness{}, - }, - { - desc: "invalid load balancer method with sticky disabled", - lb: &types.LoadBalancer{ - Method: "Invalid", - Stickiness: nil, - }, - expectedMethod: defaultMethod, - }, - { - desc: "missing load balancer", - lb: nil, - expectedMethod: defaultMethod, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - backend := &types.Backend{ - LoadBalancer: test.lb, - } - - configureBackends(map[string]*types.Backend{ - "backend": backend, - }) - - expected := types.LoadBalancer{ - Method: test.expectedMethod, - Stickiness: test.expectedStickiness, - } - - assert.Equal(t, expected, *backend.LoadBalancer) - }) - } -} diff --git a/server/server_middlewares.go b/server/server_middlewares.go deleted file mode 100644 index a25b9ef9c..000000000 --- a/server/server_middlewares.go +++ /dev/null @@ -1,350 +0,0 @@ -package server - -import ( - "fmt" - "net/http" - - "github.com/containous/traefik/log" - "github.com/containous/traefik/middlewares" - "github.com/containous/traefik/middlewares/accesslog" - mauth "github.com/containous/traefik/middlewares/auth" - "github.com/containous/traefik/middlewares/errorpages" - "github.com/containous/traefik/middlewares/forwardedheaders" - "github.com/containous/traefik/middlewares/redirect" - "github.com/containous/traefik/types" - thoas_stats "github.com/thoas/stats" - "github.com/unrolled/secure" - "github.com/urfave/negroni" -) - -type handlerPostConfig func(backendsHandlers map[string]http.Handler) error - -type modifyResponse func(*http.Response) error - -func (s *Server) buildMiddlewares(frontendName string, frontend *types.Frontend, - backends map[string]*types.Backend, entryPointName string, providerName string) ([]negroni.Handler, modifyResponse, handlerPostConfig, error) { - - var middle []negroni.Handler - var postConfig handlerPostConfig - - // Error pages - if len(frontend.Errors) > 0 { - handlers, err := buildErrorPagesMiddleware(frontendName, frontend, backends, entryPointName, providerName) - if err != nil { - return nil, nil, nil, err - } - - postConfig = errorPagesPostConfig(handlers) - - for _, handler := range handlers { - middle = append(middle, handler) - } - } - - // Metrics - if s.metricsRegistry.IsEnabled() { - handler := middlewares.NewBackendMetricsMiddleware(s.metricsRegistry, frontend.Backend) - middle = append(middle, handler) - } - - // Whitelist - ipWhitelistMiddleware, err := buildIPWhiteLister(frontend.WhiteList, s.entryPoints[entryPointName].Configuration.ClientIPStrategy) - if err != nil { - return nil, nil, nil, fmt.Errorf("error creating IP Whitelister: %s", err) - } - if ipWhitelistMiddleware != nil { - log.Debugf("Configured IP Whitelists: %v", frontend.WhiteList.SourceRange) - - handler := s.tracingMiddleware.NewNegroniHandlerWrapper( - "IP whitelist", - s.wrapNegroniHandlerWithAccessLog(ipWhitelistMiddleware, fmt.Sprintf("ipwhitelister for %s", frontendName)), - false) - middle = append(middle, handler) - } - - // Redirect - if frontend.Redirect != nil && entryPointName != frontend.Redirect.EntryPoint { - rewrite, err := s.buildRedirectHandler(entryPointName, frontend.Redirect) - if err != nil { - return nil, nil, nil, fmt.Errorf("error creating Frontend Redirect: %v", err) - } - - handler := s.wrapNegroniHandlerWithAccessLog(rewrite, fmt.Sprintf("frontend redirect for %s", frontendName)) - middle = append(middle, handler) - - log.Debugf("Frontend %s redirect created", frontendName) - } - - // Header - headerMiddleware := middlewares.NewHeaderFromStruct(frontend.Headers) - if headerMiddleware != nil { - log.Debugf("Adding header middleware for frontend %s", frontendName) - - handler := s.tracingMiddleware.NewNegroniHandlerWrapper("Header", headerMiddleware, false) - middle = append(middle, handler) - } - - // Secure - secureMiddleware := middlewares.NewSecure(frontend.Headers) - if secureMiddleware != nil { - log.Debugf("Adding secure middleware for frontend %s", frontendName) - - handler := negroni.HandlerFunc(secureMiddleware.HandlerFuncWithNextForRequestOnly) - middle = append(middle, handler) - } - - // Authentication - if frontend.Auth != nil { - authMiddleware, err := mauth.NewAuthenticator(frontend.Auth, s.tracingMiddleware) - if err != nil { - return nil, nil, nil, err - } - - handler := s.wrapNegroniHandlerWithAccessLog(authMiddleware, fmt.Sprintf("Auth for %s", frontendName)) - middle = append(middle, handler) - } - - // TLSClientHeaders - tlsClientHeadersMiddleware := middlewares.NewTLSClientHeaders(frontend) - if tlsClientHeadersMiddleware != nil { - log.Debugf("Adding TLSClientHeaders middleware for frontend %s", frontendName) - - handler := s.tracingMiddleware.NewNegroniHandlerWrapper("TLSClientHeaders", tlsClientHeadersMiddleware, false) - middle = append(middle, handler) - } - - return middle, buildModifyResponse(secureMiddleware, headerMiddleware), postConfig, nil -} - -func (s *Server) buildServerEntryPointMiddlewares(serverEntryPointName string) ([]negroni.Handler, error) { - serverMiddlewares := []negroni.Handler{middlewares.NegroniRecoverHandler()} - - if s.tracingMiddleware.IsEnabled() { - serverMiddlewares = append(serverMiddlewares, s.tracingMiddleware.NewEntryPoint(serverEntryPointName)) - } - - if s.accessLoggerMiddleware != nil { - serverMiddlewares = append(serverMiddlewares, s.accessLoggerMiddleware) - } - - if s.metricsRegistry.IsEnabled() { - serverMiddlewares = append(serverMiddlewares, middlewares.NewEntryPointMetricsMiddleware(s.metricsRegistry, serverEntryPointName)) - } - - if s.globalConfiguration.API != nil { - if s.globalConfiguration.API.Stats == nil { - s.globalConfiguration.API.Stats = thoas_stats.New() - } - serverMiddlewares = append(serverMiddlewares, s.globalConfiguration.API.Stats) - if s.globalConfiguration.API.Statistics != nil { - if s.globalConfiguration.API.StatsRecorder == nil { - s.globalConfiguration.API.StatsRecorder = middlewares.NewStatsRecorder(s.globalConfiguration.API.Statistics.RecentErrors) - } - serverMiddlewares = append(serverMiddlewares, s.globalConfiguration.API.StatsRecorder) - } - } - - if s.entryPoints[serverEntryPointName].Configuration.Redirect != nil { - redirectHandlers, err := s.buildEntryPointRedirect() - if err != nil { - return nil, fmt.Errorf("failed to create redirect middleware: %v", err) - } - serverMiddlewares = append(serverMiddlewares, redirectHandlers[serverEntryPointName]) - } - - if s.entryPoints[serverEntryPointName].Configuration.Auth != nil { - authMiddleware, err := mauth.NewAuthenticator(s.entryPoints[serverEntryPointName].Configuration.Auth, s.tracingMiddleware) - if err != nil { - return nil, fmt.Errorf("failed to create authentication middleware: %v", err) - } - serverMiddlewares = append(serverMiddlewares, s.wrapNegroniHandlerWithAccessLog(authMiddleware, fmt.Sprintf("Auth for entrypoint %s", serverEntryPointName))) - } - - if s.entryPoints[serverEntryPointName].Configuration.Compress != nil { - serverMiddlewares = append(serverMiddlewares, &middlewares.Compress{}) - } - - if s.entryPoints[serverEntryPointName].Configuration.ForwardedHeaders != nil { - xForwardedMiddleware, err := forwardedheaders.NewXforwarded( - s.entryPoints[serverEntryPointName].Configuration.ForwardedHeaders.Insecure, - s.entryPoints[serverEntryPointName].Configuration.ForwardedHeaders.TrustedIPs, - ) - if err != nil { - return nil, fmt.Errorf("failed to create xforwarded headers middleware: %v", err) - } - serverMiddlewares = append(serverMiddlewares, xForwardedMiddleware) - } - - ipWhitelistMiddleware, err := buildIPWhiteLister(s.entryPoints[serverEntryPointName].Configuration.WhiteList, s.entryPoints[serverEntryPointName].Configuration.ClientIPStrategy) - if err != nil { - return nil, fmt.Errorf("failed to create ip whitelist middleware: %v", err) - } - if ipWhitelistMiddleware != nil { - serverMiddlewares = append(serverMiddlewares, s.wrapNegroniHandlerWithAccessLog(ipWhitelistMiddleware, fmt.Sprintf("ipwhitelister for entrypoint %s", serverEntryPointName))) - } - - // RequestHost Cannonizer - serverMiddlewares = append(serverMiddlewares, &middlewares.RequestHost{}) - - return serverMiddlewares, nil -} - -func errorPagesPostConfig(epHandlers []*errorpages.Handler) handlerPostConfig { - return func(backendsHandlers map[string]http.Handler) error { - for _, errorPageHandler := range epHandlers { - if handler, ok := backendsHandlers[errorPageHandler.BackendName]; ok { - err := errorPageHandler.PostLoad(handler) - if err != nil { - return fmt.Errorf("failed to configure error pages for backend %s: %v", errorPageHandler.BackendName, err) - } - } else { - err := errorPageHandler.PostLoad(nil) - if err != nil { - return fmt.Errorf("failed to configure error pages for %s: %v", errorPageHandler.FallbackURL, err) - } - } - } - return nil - } -} - -func buildErrorPagesMiddleware(frontendName string, frontend *types.Frontend, backends map[string]*types.Backend, entryPointName string, providerName string) ([]*errorpages.Handler, error) { - var errorPageHandlers []*errorpages.Handler - - for errorPageName, errorPage := range frontend.Errors { - if frontend.Backend == errorPage.Backend { - log.Errorf("Error when creating error page %q for frontend %q: error pages backend %q is the same as backend for the frontend (infinite call risk).", - errorPageName, frontendName, errorPage.Backend) - } else if backends[errorPage.Backend] == nil { - log.Errorf("Error when creating error page %q for frontend %q: the backend %q doesn't exist.", - errorPageName, frontendName, errorPage.Backend) - } else { - errorPagesHandler, err := errorpages.NewHandler(errorPage, entryPointName+providerName+errorPage.Backend) - if err != nil { - return nil, fmt.Errorf("error creating error pages: %v", err) - } - - if errorPageServer, ok := backends[errorPage.Backend].Servers["error"]; ok { - errorPagesHandler.FallbackURL = errorPageServer.URL - } - - errorPageHandlers = append(errorPageHandlers, errorPagesHandler) - } - } - - return errorPageHandlers, nil -} - -func (s *Server) buildBasicAuthMiddleware(authData []string) (*mauth.Authenticator, error) { - users := types.Users{} - for _, user := range authData { - users = append(users, user) - } - - auth := &types.Auth{} - auth.Basic = &types.Basic{ - Users: users, - } - - authMiddleware, err := mauth.NewAuthenticator(auth, s.tracingMiddleware) - if err != nil { - return nil, fmt.Errorf("error creating Basic Auth: %v", err) - } - - return authMiddleware, nil -} - -func (s *Server) buildEntryPointRedirect() (map[string]negroni.Handler, error) { - redirectHandlers := map[string]negroni.Handler{} - - for entryPointName, ep := range s.entryPoints { - entryPoint := ep.Configuration - - if entryPoint.Redirect != nil && entryPointName != entryPoint.Redirect.EntryPoint { - handler, err := s.buildRedirectHandler(entryPointName, entryPoint.Redirect) - if err != nil { - return nil, fmt.Errorf("error loading configuration for entrypoint %s: %v", entryPointName, err) - } - - handlerToUse := s.wrapNegroniHandlerWithAccessLog(handler, fmt.Sprintf("entrypoint redirect for %s", entryPointName)) - redirectHandlers[entryPointName] = handlerToUse - } - } - - return redirectHandlers, nil -} - -func (s *Server) buildRedirectHandler(srcEntryPointName string, opt *types.Redirect) (negroni.Handler, error) { - // entry point redirect - if len(opt.EntryPoint) > 0 { - entryPoint := s.entryPoints[opt.EntryPoint].Configuration - if entryPoint == nil { - return nil, fmt.Errorf("unknown target entrypoint %q", srcEntryPointName) - } - log.Debugf("Creating entry point redirect %s -> %s", srcEntryPointName, opt.EntryPoint) - return redirect.NewEntryPointHandler(entryPoint, opt.Permanent) - } - - // regex redirect - redirection, err := redirect.NewRegexHandler(opt.Regex, opt.Replacement, opt.Permanent) - if err != nil { - return nil, err - } - log.Debugf("Creating regex redirect %s -> %s -> %s", srcEntryPointName, opt.Regex, opt.Replacement) - - return redirection, nil -} - -func buildIPWhiteLister(whiteList *types.WhiteList, ipStrategy *types.IPStrategy) (*middlewares.IPWhiteLister, error) { - if whiteList == nil { - return nil, nil - } - - if whiteList.IPStrategy != nil { - ipStrategy = whiteList.IPStrategy - } - - strategy, err := ipStrategy.Get() - if err != nil { - return nil, err - } - - return middlewares.NewIPWhiteLister(whiteList.SourceRange, strategy) -} - -func (s *Server) wrapNegroniHandlerWithAccessLog(handler negroni.Handler, frontendName string) negroni.Handler { - if s.accessLoggerMiddleware != nil { - saveUsername := accesslog.NewSaveNegroniUsername(handler) - saveBackend := accesslog.NewSaveNegroniBackend(saveUsername, "Traefik") - saveFrontend := accesslog.NewSaveNegroniFrontend(saveBackend, frontendName) - return saveFrontend - } - return handler -} - -func (s *Server) wrapHTTPHandlerWithAccessLog(handler http.Handler, frontendName string) http.Handler { - if s.accessLoggerMiddleware != nil { - saveUsername := accesslog.NewSaveUsername(handler) - saveBackend := accesslog.NewSaveBackend(saveUsername, "Traefik") - saveFrontend := accesslog.NewSaveFrontend(saveBackend, frontendName) - return saveFrontend - } - return handler -} - -func buildModifyResponse(secure *secure.Secure, header *middlewares.HeaderStruct) func(res *http.Response) error { - return func(res *http.Response) error { - if secure != nil { - if err := secure.ModifyResponseHeaders(res); err != nil { - return err - } - } - - if header != nil { - if err := header.ModifyResponseHeaders(res); err != nil { - return err - } - } - return nil - } -} diff --git a/server/server_middlewares_test.go b/server/server_middlewares_test.go deleted file mode 100644 index 8236d0e53..000000000 --- a/server/server_middlewares_test.go +++ /dev/null @@ -1,270 +0,0 @@ -package server - -import ( - "net/http" - "net/http/httptest" - "reflect" - "testing" - - "github.com/containous/mux" - "github.com/containous/traefik/configuration" - "github.com/containous/traefik/metrics" - "github.com/containous/traefik/middlewares" - th "github.com/containous/traefik/testhelpers" - "github.com/containous/traefik/tls" - "github.com/containous/traefik/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/urfave/negroni" -) - -func TestServerEntryPointWhitelistConfig(t *testing.T) { - testCases := []struct { - desc string - entrypoint *configuration.EntryPoint - expectMiddleware bool - }{ - { - desc: "no whitelist middleware if no config on entrypoint", - entrypoint: &configuration.EntryPoint{ - Address: ":0", - ForwardedHeaders: &configuration.ForwardedHeaders{Insecure: true}, - }, - expectMiddleware: false, - }, - { - desc: "whitelist middleware should be added if configured on entrypoint", - entrypoint: &configuration.EntryPoint{ - Address: ":0", - WhiteList: &types.WhiteList{ - SourceRange: []string{ - "127.0.0.1/32", - }, - }, - ForwardedHeaders: &configuration.ForwardedHeaders{Insecure: true}, - }, - expectMiddleware: true, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - srv := Server{ - globalConfiguration: configuration.GlobalConfiguration{}, - metricsRegistry: metrics.NewVoidRegistry(), - entryPoints: map[string]EntryPoint{ - "test": { - Configuration: test.entrypoint, - }, - }, - } - - srv.serverEntryPoints = srv.buildServerEntryPoints() - srvEntryPoint := srv.setupServerEntryPoint("test", srv.serverEntryPoints["test"]) - handler := srvEntryPoint.httpServer.Handler.(*mux.Router).NotFoundHandler.(*negroni.Negroni) - - found := false - for _, handler := range handler.Handlers() { - if reflect.TypeOf(handler) == reflect.TypeOf((*middlewares.IPWhiteLister)(nil)) { - found = true - } - } - - if found && !test.expectMiddleware { - t.Error("ip whitelist middleware was installed even though it should not") - } - - if !found && test.expectMiddleware { - t.Error("ip whitelist middleware was not installed even though it should have") - } - }) - } -} - -func TestBuildIPWhiteLister(t *testing.T) { - testCases := []struct { - desc string - whitelistSourceRange []string - whiteList *types.WhiteList - middlewareConfigured bool - errMessage string - }{ - { - desc: "no whitelists configured", - whitelistSourceRange: nil, - middlewareConfigured: false, - errMessage: "", - }, - { - desc: "whitelists configured", - whiteList: &types.WhiteList{ - SourceRange: []string{ - "1.2.3.4/24", - "fe80::/16", - }, - }, - middlewareConfigured: true, - errMessage: "", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - middleware, err := buildIPWhiteLister(test.whiteList, nil) - - if test.errMessage != "" { - require.EqualError(t, err, test.errMessage) - } else { - assert.NoError(t, err) - - if test.middlewareConfigured { - require.NotNil(t, middleware, "not expected middleware to be configured") - } else { - require.Nil(t, middleware, "expected middleware to be configured") - } - } - }) - } -} - -func TestBuildRedirectHandler(t *testing.T) { - srv := Server{ - globalConfiguration: configuration.GlobalConfiguration{}, - entryPoints: map[string]EntryPoint{ - "http": {Configuration: &configuration.EntryPoint{Address: ":80"}}, - "https": {Configuration: &configuration.EntryPoint{Address: ":443", TLS: &tls.TLS{}}}, - }, - } - - testCases := []struct { - desc string - srcEntryPointName string - url string - entryPoint *configuration.EntryPoint - redirect *types.Redirect - expectedURL string - }{ - { - desc: "redirect regex", - srcEntryPointName: "http", - url: "http://foo.com", - redirect: &types.Redirect{ - Regex: `^(?:http?:\/\/)(foo)(\.com)$`, - Replacement: "https://$1{{\"bar\"}}$2", - }, - entryPoint: &configuration.EntryPoint{ - Address: ":80", - Redirect: &types.Redirect{ - Regex: `^(?:http?:\/\/)(foo)(\.com)$`, - Replacement: "https://$1{{\"bar\"}}$2", - }, - }, - expectedURL: "https://foobar.com", - }, - { - desc: "redirect entry point", - srcEntryPointName: "http", - url: "http://foo:80", - redirect: &types.Redirect{ - EntryPoint: "https", - }, - entryPoint: &configuration.EntryPoint{ - Address: ":80", - Redirect: &types.Redirect{ - EntryPoint: "https", - }, - }, - expectedURL: "https://foo:443", - }, - { - desc: "redirect entry point with regex (ignored)", - srcEntryPointName: "http", - url: "http://foo.com:80", - redirect: &types.Redirect{ - EntryPoint: "https", - Regex: `^(?:http?:\/\/)(foo)(\.com)$`, - Replacement: "https://$1{{\"bar\"}}$2", - }, - entryPoint: &configuration.EntryPoint{ - Address: ":80", - Redirect: &types.Redirect{ - EntryPoint: "https", - Regex: `^(?:http?:\/\/)(foo)(\.com)$`, - Replacement: "https://$1{{\"bar\"}}$2", - }, - }, - expectedURL: "https://foo.com:443", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - rewrite, err := srv.buildRedirectHandler(test.srcEntryPointName, test.redirect) - require.NoError(t, err) - - req := th.MustNewRequest(http.MethodGet, test.url, nil) - recorder := httptest.NewRecorder() - - rewrite.ServeHTTP(recorder, req, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Location", "fail") - })) - - location, err := recorder.Result().Location() - require.NoError(t, err) - assert.Equal(t, test.expectedURL, location.String()) - }) - } -} - -func TestServerGenericFrontendAuthFail(t *testing.T) { - globalConfig := configuration.GlobalConfiguration{ - EntryPoints: configuration.EntryPoints{ - "http": &configuration.EntryPoint{ForwardedHeaders: &configuration.ForwardedHeaders{Insecure: true}}, - }, - } - entryPoints := map[string]EntryPoint{ - "http": { - Configuration: globalConfig.EntryPoints["http"], - }, - } - - dynamicConfigs := types.Configurations{ - "config": &types.Configuration{ - Frontends: map[string]*types.Frontend{ - "frontend": { - EntryPoints: []string{"http"}, - Backend: "backend", - Auth: &types.Auth{ - Basic: &types.Basic{ - Users: []string{""}, - }}, - }, - }, - Backends: map[string]*types.Backend{ - "backend": { - Servers: map[string]types.Server{ - "server": { - URL: "http://localhost", - }, - }, - LoadBalancer: &types.LoadBalancer{ - Method: "Wrr", - }, - }, - }, - }, - } - - srv := NewServer(globalConfig, nil, entryPoints) - - _ = srv.loadConfig(dynamicConfigs, globalConfig) -} diff --git a/server/server_signals.go b/server/server_signals.go index 8472696d7..5badcff24 100644 --- a/server/server_signals.go +++ b/server/server_signals.go @@ -21,16 +21,16 @@ func (s *Server) listenSignals(stop chan bool) { case sig := <-s.signals: switch sig { case syscall.SIGUSR1: - log.Infof("Closing and re-opening log files for rotation: %+v", sig) + log.WithoutContext().Infof("Closing and re-opening log files for rotation: %+v", sig) if s.accessLoggerMiddleware != nil { if err := s.accessLoggerMiddleware.Rotate(); err != nil { - log.Errorf("Error rotating access log: %v", err) + log.WithoutContext().Errorf("Error rotating access log: %v", err) } } if err := log.RotateFile(); err != nil { - log.Errorf("Error rotating traefik log: %v", err) + log.WithoutContext().Errorf("Error rotating traefik log: %v", err) } } } diff --git a/server/server_test.go b/server/server_test.go index 512c5ee35..811181833 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -9,13 +9,12 @@ import ( "github.com/containous/flaeg/parse" "github.com/containous/mux" - "github.com/containous/traefik/configuration" + "github.com/containous/traefik/config" "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/old/configuration" th "github.com/containous/traefik/testhelpers" - "github.com/containous/traefik/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/unrolled/secure" ) func TestPrepareServerTimeouts(t *testing.T) { @@ -62,7 +61,7 @@ func TestPrepareServerTimeouts(t *testing.T) { router := middlewares.NewHandlerSwitcher(mux.NewRouter()) srv := NewServer(test.globalConfig, nil, nil) - httpServer, _, err := srv.prepareServer(entryPointName, entryPoint, router, nil) + httpServer, _, err := srv.prepareServer(context.Background(), entryPointName, entryPoint, router) require.NoError(t, err, "Unexpected error when preparing srv") assert.Equal(t, test.expectedIdleTimeout, httpServer.IdleTimeout, "IdleTimeout") @@ -87,7 +86,7 @@ func TestListenProvidersSkipsEmptyConfigs(t *testing.T) { } }() - server.configurationChan <- types.ConfigMessage{ProviderName: "kubernetes"} + server.configurationChan <- config.Message{ProviderName: "kubernetes"} // give some time so that the configuration can be processed time.Sleep(100 * time.Millisecond) @@ -103,12 +102,12 @@ func TestListenProvidersSkipsSameConfigurationForProvider(t *testing.T) { select { case <-stop: return - case config := <-server.configurationValidatedChan: + case conf := <-server.configurationValidatedChan: // set the current configuration // this is usually done in the processing part of the published configuration // so we have to emulate the behavior here - currentConfigurations := server.currentConfigurations.Get().(types.Configurations) - currentConfigurations[config.ProviderName] = config.Configuration + currentConfigurations := server.currentConfigurations.Get().(config.Configurations) + currentConfigurations[conf.ProviderName] = conf.Configuration server.currentConfigurations.Set(currentConfigurations) publishedConfigCount++ @@ -119,19 +118,19 @@ func TestListenProvidersSkipsSameConfigurationForProvider(t *testing.T) { } }() - config := th.BuildConfiguration( - th.WithFrontends(th.WithFrontend("backend")), - th.WithBackends(th.WithBackendNew("backend")), + conf := th.BuildConfiguration( + th.WithRouters(th.WithRouter("foo")), + th.WithLoadBalancerServices(th.WithService("bar")), ) // provide a configuration - server.configurationChan <- types.ConfigMessage{ProviderName: "kubernetes", Configuration: config} + server.configurationChan <- config.Message{ProviderName: "kubernetes", Configuration: conf} // give some time so that the configuration can be processed time.Sleep(20 * time.Millisecond) // provide the same configuration a second time - server.configurationChan <- types.ConfigMessage{ProviderName: "kubernetes", Configuration: config} + server.configurationChan <- config.Message{ProviderName: "kubernetes", Configuration: conf} // give some time so that the configuration can be processed time.Sleep(100 * time.Millisecond) @@ -160,12 +159,12 @@ func TestListenProvidersPublishesConfigForEachProvider(t *testing.T) { } }() - config := th.BuildConfiguration( - th.WithFrontends(th.WithFrontend("backend")), - th.WithBackends(th.WithBackendNew("backend")), + conf := th.BuildConfiguration( + th.WithRouters(th.WithRouter("foo")), + th.WithLoadBalancerServices(th.WithService("bar")), ) - server.configurationChan <- types.ConfigMessage{ProviderName: "kubernetes", Configuration: config} - server.configurationChan <- types.ConfigMessage{ProviderName: "marathon", Configuration: config} + server.configurationChan <- config.Message{ProviderName: "kubernetes", Configuration: conf} + server.configurationChan <- config.Message{ProviderName: "marathon", Configuration: conf} select { case <-consumePublishedConfigsDone: @@ -206,20 +205,21 @@ func TestServerResponseEmptyBackend(t *testing.T) { testCases := []struct { desc string - config func(testServerURL string) *types.Configuration + config func(testServerURL string) *config.Configuration expectedStatusCode int }{ { desc: "Ok", - config: func(testServerURL string) *types.Configuration { + config: func(testServerURL string) *config.Configuration { return th.BuildConfiguration( - th.WithFrontends(th.WithFrontend("backend", + th.WithRouters(th.WithRouter("foo", th.WithEntryPoints("http"), - th.WithRoutes(th.WithRoute(requestPath, routeRule))), + th.WithServiceName("bar"), + th.WithRule(routeRule)), ), - th.WithBackends(th.WithBackendNew("backend", + th.WithLoadBalancerServices(th.WithService("bar", th.WithLBMethod("wrr"), - th.WithServersNew(th.WithServerNew(testServerURL))), + th.WithServers(th.WithServer(testServerURL))), ), ) }, @@ -227,20 +227,21 @@ func TestServerResponseEmptyBackend(t *testing.T) { }, { desc: "No Frontend", - config: func(testServerURL string) *types.Configuration { + config: func(testServerURL string) *config.Configuration { return th.BuildConfiguration() }, expectedStatusCode: http.StatusNotFound, }, { desc: "Empty Backend LB-Drr", - config: func(testServerURL string) *types.Configuration { + config: func(testServerURL string) *config.Configuration { return th.BuildConfiguration( - th.WithFrontends(th.WithFrontend("backend", + th.WithRouters(th.WithRouter("foo", th.WithEntryPoints("http"), - th.WithRoutes(th.WithRoute(requestPath, routeRule))), + th.WithServiceName("bar"), + th.WithRule(routeRule)), ), - th.WithBackends(th.WithBackendNew("backend", + th.WithLoadBalancerServices(th.WithService("bar", th.WithLBMethod("drr")), ), ) @@ -249,14 +250,15 @@ func TestServerResponseEmptyBackend(t *testing.T) { }, { desc: "Empty Backend LB-Drr Sticky", - config: func(testServerURL string) *types.Configuration { + config: func(testServerURL string) *config.Configuration { return th.BuildConfiguration( - th.WithFrontends(th.WithFrontend("backend", + th.WithRouters(th.WithRouter("foo", th.WithEntryPoints("http"), - th.WithRoutes(th.WithRoute(requestPath, routeRule))), + th.WithServiceName("bar"), + th.WithRule(routeRule)), ), - th.WithBackends(th.WithBackendNew("backend", - th.WithLBMethod("drr"), th.WithLBSticky("test")), + th.WithLoadBalancerServices(th.WithService("bar", + th.WithLBMethod("drr"), th.WithStickiness("test")), ), ) }, @@ -264,13 +266,14 @@ func TestServerResponseEmptyBackend(t *testing.T) { }, { desc: "Empty Backend LB-Wrr", - config: func(testServerURL string) *types.Configuration { + config: func(testServerURL string) *config.Configuration { return th.BuildConfiguration( - th.WithFrontends(th.WithFrontend("backend", + th.WithRouters(th.WithRouter("foo", th.WithEntryPoints("http"), - th.WithRoutes(th.WithRoute(requestPath, routeRule))), + th.WithServiceName("bar"), + th.WithRule(routeRule)), ), - th.WithBackends(th.WithBackendNew("backend", + th.WithLoadBalancerServices(th.WithService("bar", th.WithLBMethod("wrr")), ), ) @@ -279,14 +282,15 @@ func TestServerResponseEmptyBackend(t *testing.T) { }, { desc: "Empty Backend LB-Wrr Sticky", - config: func(testServerURL string) *types.Configuration { + config: func(testServerURL string) *config.Configuration { return th.BuildConfiguration( - th.WithFrontends(th.WithFrontend("backend", + th.WithRouters(th.WithRouter("foo", th.WithEntryPoints("http"), - th.WithRoutes(th.WithRoute(requestPath, routeRule))), + th.WithServiceName("bar"), + th.WithRule(routeRule)), ), - th.WithBackends(th.WithBackendNew("backend", - th.WithLBMethod("wrr"), th.WithLBSticky("test")), + th.WithLoadBalancerServices(th.WithService("bar", + th.WithLBMethod("wrr"), th.WithStickiness("test")), ), ) }, @@ -309,134 +313,17 @@ func TestServerResponseEmptyBackend(t *testing.T) { entryPointsConfig := map[string]EntryPoint{ "http": {Configuration: &configuration.EntryPoint{ForwardedHeaders: &configuration.ForwardedHeaders{Insecure: true}}}, } - dynamicConfigs := types.Configurations{"config": test.config(testServer.URL)} + dynamicConfigs := config.Configurations{"config": test.config(testServer.URL)} srv := NewServer(globalConfig, nil, entryPointsConfig) - entryPoints := srv.loadConfig(dynamicConfigs, globalConfig) + entryPoints, _ := srv.loadConfig(dynamicConfigs, globalConfig) responseRecorder := &httptest.ResponseRecorder{} request := httptest.NewRequest(http.MethodGet, testServer.URL+requestPath, nil) - entryPoints["http"].httpRouter.ServeHTTP(responseRecorder, request) + entryPoints["http"].ServeHTTP(responseRecorder, request) assert.Equal(t, test.expectedStatusCode, responseRecorder.Result().StatusCode, "status code") }) } } - -type mockContext struct { - headers http.Header -} - -func (c mockContext) Deadline() (deadline time.Time, ok bool) { - return deadline, ok -} - -func (c mockContext) Done() <-chan struct{} { - ch := make(chan struct{}) - close(ch) - return ch -} - -func (c mockContext) Err() error { - return context.DeadlineExceeded -} - -func (c mockContext) Value(key interface{}) interface{} { - return c.headers -} - -func TestNewServerWithResponseModifiers(t *testing.T) { - testCases := []struct { - desc string - headerMiddleware *middlewares.HeaderStruct - secureMiddleware *secure.Secure - ctx context.Context - expected map[string]string - }{ - { - desc: "header and secure nil", - headerMiddleware: nil, - secureMiddleware: nil, - ctx: mockContext{}, - expected: map[string]string{ - "X-Default": "powpow", - "Referrer-Policy": "same-origin", - }, - }, - { - desc: "header middleware not nil", - headerMiddleware: middlewares.NewHeaderFromStruct(&types.Headers{ - CustomResponseHeaders: map[string]string{ - "X-Default": "powpow", - }, - }), - secureMiddleware: nil, - ctx: mockContext{}, - expected: map[string]string{ - "X-Default": "powpow", - "Referrer-Policy": "same-origin", - }, - }, - { - desc: "secure middleware not nil", - headerMiddleware: nil, - secureMiddleware: middlewares.NewSecure(&types.Headers{ - ReferrerPolicy: "no-referrer", - }), - ctx: mockContext{ - headers: http.Header{"Referrer-Policy": []string{"no-referrer"}}, - }, - expected: map[string]string{ - "X-Default": "powpow", - "Referrer-Policy": "no-referrer", - }, - }, - { - desc: "header and secure middleware not nil", - headerMiddleware: middlewares.NewHeaderFromStruct(&types.Headers{ - CustomResponseHeaders: map[string]string{ - "Referrer-Policy": "powpow", - }, - }), - secureMiddleware: middlewares.NewSecure(&types.Headers{ - ReferrerPolicy: "no-referrer", - }), - ctx: mockContext{ - headers: http.Header{"Referrer-Policy": []string{"no-referrer"}}, - }, - expected: map[string]string{ - "X-Default": "powpow", - "Referrer-Policy": "powpow", - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - headers := make(http.Header) - headers.Add("X-Default", "powpow") - headers.Add("Referrer-Policy", "same-origin") - - req := httptest.NewRequest(http.MethodGet, "http://127.0.0.1", nil) - - res := &http.Response{ - Request: req.WithContext(test.ctx), - Header: headers, - } - - responseModifier := buildModifyResponse(test.secureMiddleware, test.headerMiddleware) - err := responseModifier(res) - - assert.NoError(t, err) - assert.Equal(t, len(test.expected), len(res.Header)) - - for k, v := range test.expected { - assert.Equal(t, v, res.Header.Get(k)) - } - }) - } -} diff --git a/server/bufferpool.go b/server/service/bufferpool.go similarity index 96% rename from server/bufferpool.go rename to server/service/bufferpool.go index 6cd194830..948611502 100644 --- a/server/bufferpool.go +++ b/server/service/bufferpool.go @@ -1,4 +1,4 @@ -package server +package service import "sync" diff --git a/server/service/service.go b/server/service/service.go new file mode 100644 index 000000000..c053a7152 --- /dev/null +++ b/server/service/service.go @@ -0,0 +1,280 @@ +package service + +import ( + "context" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "time" + + "github.com/containous/flaeg/parse" + "github.com/containous/traefik/config" + "github.com/containous/traefik/healthcheck" + "github.com/containous/traefik/log" + "github.com/containous/traefik/middlewares/emptybackendhandler" + "github.com/containous/traefik/old/middlewares/pipelining" + "github.com/containous/traefik/server/cookie" + "github.com/vulcand/oxy/forward" + "github.com/vulcand/oxy/roundrobin" +) + +const ( + defaultHealthCheckInterval = 30 * time.Second + defaultHealthCheckTimeout = 5 * time.Second +) + +// See oxy/roundrobin/rr.go +type balancerHandler interface { + Servers() []*url.URL + ServeHTTP(w http.ResponseWriter, req *http.Request) + ServerWeight(u *url.URL) (int, bool) + RemoveServer(u *url.URL) error + UpsertServer(u *url.URL, options ...roundrobin.ServerOption) error + NextServer() (*url.URL, error) + Next() http.Handler +} + +// NewManager creates a new Manager +func NewManager(configs map[string]*config.Service, defaultRoundTripper http.RoundTripper) *Manager { + return &Manager{ + bufferPool: newBufferPool(), + defaultRoundTripper: defaultRoundTripper, + balancers: make(map[string][]healthcheck.BalancerHandler), + configs: configs, + } +} + +// Manager The service manager +type Manager struct { + bufferPool httputil.BufferPool + defaultRoundTripper http.RoundTripper + balancers map[string][]healthcheck.BalancerHandler + configs map[string]*config.Service +} + +// Build Creates a http.Handler for a service configuration. +func (m *Manager) Build(rootCtx context.Context, serviceName string, responseModifier func(*http.Response) error) (http.Handler, error) { + ctx := log.With(rootCtx, log.Str(log.ServiceName, serviceName)) + + // TODO refactor ? + if conf, ok := m.configs[serviceName]; ok { + // FIXME Should handle multiple service types + return m.getLoadBalancerServiceHandler(ctx, serviceName, conf.LoadBalancer, responseModifier) + } + return nil, fmt.Errorf("the service %q does not exits", serviceName) +} + +func (m *Manager) getLoadBalancerServiceHandler( + ctx context.Context, + serviceName string, + service *config.LoadBalancerService, + responseModifier func(*http.Response) error, +) (http.Handler, error) { + + fwd, err := m.buildForwarder(service.PassHostHeader, service.ResponseForwarding, responseModifier) + if err != nil { + return nil, err + } + + fwd = pipelining.NewPipelining(fwd) + + rr, err := roundrobin.New(fwd) + if err != nil { + return nil, err + } + + balancer, err := m.getLoadBalancer(ctx, serviceName, service, fwd, rr) + if err != nil { + return nil, err + } + + // TODO rename and checks + m.balancers[serviceName] = append(m.balancers[serviceName], balancer) + + // Empty (backend with no servers) + return emptybackendhandler.New(balancer), nil +} + +// LaunchHealthCheck Launches the health checks. +func (m *Manager) LaunchHealthCheck() { + backendConfigs := make(map[string]*healthcheck.BackendConfig) + + for serviceName, balancers := range m.balancers { + ctx := log.With(context.Background(), log.Str(log.ServiceName, serviceName)) + + // FIXME aggregate + balancer := balancers[0] + + // FIXME Should all the services handle healthcheck? Handle different types + service := m.configs[serviceName].LoadBalancer + + // Health Check + var backendHealthCheck *healthcheck.BackendConfig + if hcOpts := buildHealthCheckOptions(ctx, balancer, serviceName, service.HealthCheck); hcOpts != nil { + log.FromContext(ctx).Debugf("Setting up healthcheck for service %s with %s", serviceName, *hcOpts) + + hcOpts.Transport = m.defaultRoundTripper + backendHealthCheck = healthcheck.NewBackendConfig(*hcOpts, serviceName) + } + + if backendHealthCheck != nil { + backendConfigs[serviceName] = backendHealthCheck + } + } + + // FIXME metrics and context + healthcheck.GetHealthCheck().SetBackendsConfiguration(context.TODO(), backendConfigs) +} + +func buildHealthCheckOptions(ctx context.Context, lb healthcheck.BalancerHandler, backend string, hc *config.HealthCheck) *healthcheck.Options { + if hc == nil || hc.Path == "" { + return nil + } + + logger := log.FromContext(ctx) + + interval := defaultHealthCheckInterval + if hc.Interval != "" { + intervalOverride, err := time.ParseDuration(hc.Interval) + if err != nil { + logger.Errorf("Illegal health check interval for '%s': %s", backend, err) + } else if intervalOverride <= 0 { + logger.Errorf("Health check interval smaller than zero for service '%s'", backend) + } else { + interval = intervalOverride + } + } + + timeout := defaultHealthCheckTimeout + if hc.Timeout != "" { + timeoutOverride, err := time.ParseDuration(hc.Timeout) + if err != nil { + logger.Errorf("Illegal health check timeout for backend '%s': %s", backend, err) + } else if timeoutOverride <= 0 { + logger.Errorf("Health check timeout smaller than zero for backend '%s', backend", backend) + } else { + timeout = timeoutOverride + } + } + + if timeout >= interval { + logger.Warnf("Health check timeout for backend '%s' should be lower than the health check interval. Interval set to timeout + 1 second (%s).", backend) + } + + return &healthcheck.Options{ + Scheme: hc.Scheme, + Path: hc.Path, + Port: hc.Port, + Interval: interval, + Timeout: timeout, + LB: lb, + Hostname: hc.Hostname, + Headers: hc.Headers, + } +} + +func (m *Manager) getLoadBalancer(ctx context.Context, serviceName string, service *config.LoadBalancerService, fwd http.Handler, rr balancerHandler) (healthcheck.BalancerHandler, error) { + logger := log.FromContext(ctx) + + var stickySession *roundrobin.StickySession + var cookieName string + if stickiness := service.Stickiness; stickiness != nil { + cookieName = cookie.GetName(stickiness.CookieName, serviceName) + stickySession = roundrobin.NewStickySession(cookieName) + } + + var lb healthcheck.BalancerHandler + var err error + + if service.Method == "drr" { + logger.Debug("Creating drr load-balancer") + + if stickySession != nil { + logger.Debugf("Sticky session cookie name: %v", cookieName) + + lb, err = roundrobin.NewRebalancer(rr, roundrobin.RebalancerStickySession(stickySession)) + if err != nil { + return nil, err + } + } else { + lb, err = roundrobin.NewRebalancer(rr) + if err != nil { + return nil, err + } + } + } else { + if service.Method != "wrr" { + logger.Warnf("Invalid load-balancing method %q, fallback to 'wrr' method", service.Method) + } + + logger.Debug("Creating wrr load-balancer") + + if stickySession != nil { + logger.Debugf("Sticky session cookie name: %v", cookieName) + + lb, err = roundrobin.New(fwd, roundrobin.EnableStickySession(stickySession)) + if err != nil { + return nil, err + } + } else { + lb = rr + } + } + + if err := m.upsertServers(ctx, lb, service.Servers); err != nil { + return nil, fmt.Errorf("error configuring load balancer for service %s: %v", serviceName, err) + } + + return lb, nil +} + +func (m *Manager) upsertServers(ctx context.Context, lb healthcheck.BalancerHandler, servers []config.Server) error { + logger := log.FromContext(ctx) + + for name, srv := range servers { + u, err := url.Parse(srv.URL) + if err != nil { + return fmt.Errorf("error parsing server URL %s: %v", srv.URL, err) + } + + logger.WithField(log.ServerName, name).Debugf("Creating server %d at %s with weight %d", name, u, srv.Weight) + + if err := lb.UpsertServer(u, roundrobin.Weight(srv.Weight)); err != nil { + return fmt.Errorf("error adding server %s to load balancer: %v", srv.URL, err) + } + + // FIXME Handle Metrics + } + return nil +} + +func (m *Manager) buildForwarder(passHostHeader bool, responseForwarding *config.ResponseForwarding, responseModifier func(*http.Response) error) (http.Handler, error) { + + var flushInterval parse.Duration + if responseForwarding != nil { + err := flushInterval.Set(responseForwarding.FlushInterval) + if err != nil { + return nil, fmt.Errorf("error creating flush interval: %v", err) + } + } + + return forward.New( + forward.Stream(true), + forward.PassHostHeader(passHostHeader), + forward.RoundTripper(m.defaultRoundTripper), + forward.ResponseModifier(responseModifier), + forward.BufferPool(m.bufferPool), + forward.StreamingFlushInterval(time.Duration(flushInterval)), + forward.WebsocketConnectionClosedHook(func(req *http.Request, conn net.Conn) { + server := req.Context().Value(http.ServerContextKey).(*http.Server) + if server != nil { + connState := server.ConnState + if connState != nil { + connState(conn, http.StateClosed) + } + } + }), + ) +} diff --git a/server/service/service_test.go b/server/service/service_test.go new file mode 100644 index 000000000..5bcb1d3b5 --- /dev/null +++ b/server/service/service_test.go @@ -0,0 +1,327 @@ +package service + +import ( + "context" + "errors" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/containous/traefik/config" + "github.com/containous/traefik/testhelpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/vulcand/oxy/roundrobin" +) + +type MockRR struct { + err error +} + +func (*MockRR) Servers() []*url.URL { + panic("implement me") +} + +func (*MockRR) ServeHTTP(w http.ResponseWriter, req *http.Request) { + panic("implement me") +} + +func (*MockRR) ServerWeight(u *url.URL) (int, bool) { + panic("implement me") +} + +func (*MockRR) RemoveServer(u *url.URL) error { + panic("implement me") +} + +func (m *MockRR) UpsertServer(u *url.URL, options ...roundrobin.ServerOption) error { + return m.err +} + +func (*MockRR) NextServer() (*url.URL, error) { + panic("implement me") +} + +func (*MockRR) Next() http.Handler { + panic("implement me") +} + +type MockForwarder struct{} + +func (MockForwarder) ServeHTTP(http.ResponseWriter, *http.Request) { + panic("implement me") +} + +func TestGetLoadBalancer(t *testing.T) { + sm := Manager{} + + testCases := []struct { + desc string + serviceName string + service *config.LoadBalancerService + fwd http.Handler + rr balancerHandler + expectError bool + }{ + { + desc: "Fails when provided an invalid URL", + serviceName: "test", + service: &config.LoadBalancerService{ + Servers: []config.Server{ + { + URL: ":", + Weight: 0, + }, + }, + }, + fwd: &MockForwarder{}, + rr: &MockRR{}, + expectError: true, + }, + { + desc: "Fails when the server upsert fails", + serviceName: "test", + service: &config.LoadBalancerService{ + Servers: []config.Server{ + { + URL: "http://foo", + Weight: 0, + }, + }, + }, + fwd: &MockForwarder{}, + rr: &MockRR{err: errors.New("upsert fails")}, + expectError: true, + }, + { + desc: "Succeeds when there are no servers", + serviceName: "test", + service: &config.LoadBalancerService{}, + fwd: &MockForwarder{}, + rr: &MockRR{}, + expectError: false, + }, + { + desc: "Succeeds when stickiness is set", + serviceName: "test", + service: &config.LoadBalancerService{ + Stickiness: &config.Stickiness{}, + }, + fwd: &MockForwarder{}, + rr: &MockRR{}, + expectError: false, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + handler, err := sm.getLoadBalancer(context.Background(), test.serviceName, test.service, test.fwd, test.rr) + if test.expectError { + require.Error(t, err) + assert.Nil(t, handler) + } else { + require.NoError(t, err) + assert.NotNil(t, handler) + } + }) + } +} + +func TestGetLoadBalancerServiceHandler(t *testing.T) { + sm := NewManager(nil, http.DefaultTransport) + + server1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-From", "first") + })) + defer server1.Close() + + server2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-From", "second") + })) + defer server2.Close() + + serverPassHost := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-From", "passhost") + assert.Equal(t, "callme", r.Host) + })) + defer serverPassHost.Close() + + serverPassHostFalse := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-From", "passhostfalse") + assert.NotEqual(t, "callme", r.Host) + })) + defer serverPassHostFalse.Close() + + type ExpectedResult struct { + StatusCode int + XFrom string + } + + testCases := []struct { + desc string + serviceName string + service *config.LoadBalancerService + responseModifier func(*http.Response) error + + expected []ExpectedResult + }{ + { + desc: "Load balances between the two servers", + serviceName: "test", + service: &config.LoadBalancerService{ + Servers: []config.Server{ + { + URL: server1.URL, + Weight: 50, + }, + { + URL: server2.URL, + Weight: 50, + }, + }, + Method: "wrr", + }, + expected: []ExpectedResult{ + { + StatusCode: http.StatusOK, + XFrom: "first", + }, + { + StatusCode: http.StatusOK, + XFrom: "second", + }, + }, + }, + { + desc: "StatusBadGateway when the server is not reachable", + serviceName: "test", + service: &config.LoadBalancerService{ + Servers: []config.Server{ + { + URL: "http://foo", + Weight: 1, + }, + }, + Method: "wrr", + }, + expected: []ExpectedResult{ + { + StatusCode: http.StatusBadGateway, + }, + }, + }, + { + desc: "ServiceUnavailable when no servers are available", + serviceName: "test", + service: &config.LoadBalancerService{ + Servers: []config.Server{}, + Method: "wrr", + }, + expected: []ExpectedResult{ + { + StatusCode: http.StatusServiceUnavailable, + }, + }, + }, + { + desc: "Always call the same server when stickiness is true", + serviceName: "test", + service: &config.LoadBalancerService{ + Stickiness: &config.Stickiness{}, + Servers: []config.Server{ + { + URL: server1.URL, + Weight: 1, + }, + { + URL: server2.URL, + Weight: 1, + }, + }, + Method: "wrr", + }, + expected: []ExpectedResult{ + { + StatusCode: http.StatusOK, + XFrom: "first", + }, + { + StatusCode: http.StatusOK, + XFrom: "first", + }, + }, + }, + { + desc: "PassHost passes the host instead of the IP", + serviceName: "test", + service: &config.LoadBalancerService{ + Stickiness: &config.Stickiness{}, + PassHostHeader: true, + Servers: []config.Server{ + { + URL: serverPassHost.URL, + Weight: 1, + }, + }, + Method: "wrr", + }, + expected: []ExpectedResult{ + { + StatusCode: http.StatusOK, + XFrom: "passhost", + }, + }, + }, + { + desc: "PassHost doesn't passe the host instead of the IP", + serviceName: "test", + service: &config.LoadBalancerService{ + Stickiness: &config.Stickiness{}, + Servers: []config.Server{ + { + URL: serverPassHostFalse.URL, + Weight: 1, + }, + }, + Method: "wrr", + }, + expected: []ExpectedResult{ + { + StatusCode: http.StatusOK, + XFrom: "passhostfalse", + }, + }, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + + handler, err := sm.getLoadBalancerServiceHandler(context.Background(), test.serviceName, test.service, test.responseModifier) + + assert.NoError(t, err) + assert.NotNil(t, handler) + + req := testhelpers.MustNewRequest(http.MethodGet, "http://callme", nil) + for _, expected := range test.expected { + recorder := httptest.NewRecorder() + + handler.ServeHTTP(recorder, req) + + assert.Equal(t, expected.StatusCode, recorder.Code) + assert.Equal(t, expected.XFrom, recorder.Header().Get("X-From")) + + if len(recorder.Header().Get("Set-Cookie")) > 0 { + req.Header.Set("Cookie", recorder.Header().Get("Set-Cookie")) + } + } + }) + } +} + +// FIXME Add healthcheck tests diff --git a/testhelpers/config.go b/testhelpers/config.go index 4b2dd79da..85d04a649 100644 --- a/testhelpers/config.go +++ b/testhelpers/config.go @@ -1,155 +1,154 @@ package testhelpers import ( - "github.com/containous/traefik/provider" - "github.com/containous/traefik/types" + "github.com/containous/traefik/config" ) // BuildConfiguration is a helper to create a configuration. -func BuildConfiguration(dynamicConfigBuilders ...func(*types.Configuration)) *types.Configuration { - config := &types.Configuration{} +func BuildConfiguration(dynamicConfigBuilders ...func(*config.Configuration)) *config.Configuration { + conf := &config.Configuration{} for _, build := range dynamicConfigBuilders { - build(config) + build(conf) } - return config + return conf } -// -- Backend - -// WithBackends is a helper to create a configuration -func WithBackends(opts ...func(*types.Backend) string) func(*types.Configuration) { - return func(c *types.Configuration) { - c.Backends = make(map[string]*types.Backend) +// WithRouters is a helper to create a configuration. +func WithRouters(opts ...func(*config.Router) string) func(*config.Configuration) { + return func(c *config.Configuration) { + c.Routers = make(map[string]*config.Router) for _, opt := range opts { - b := &types.Backend{} + b := &config.Router{} name := opt(b) - c.Backends[name] = b + c.Routers[name] = b } } } -// WithBackendNew is a helper to create a configuration -func WithBackendNew(name string, opts ...func(*types.Backend)) func(*types.Backend) string { - return func(b *types.Backend) string { +// WithRouter is a helper to create a configuration. +func WithRouter(routerName string, opts ...func(*config.Router)) func(*config.Router) string { + return func(r *config.Router) string { for _, opt := range opts { - opt(b) + opt(r) + } + return routerName + } +} + +// WithRouterMiddlewares is a helper to create a configuration. +func WithRouterMiddlewares(middlewaresName ...string) func(*config.Router) { + return func(r *config.Router) { + r.Middlewares = middlewaresName + } +} + +// WithServiceName is a helper to create a configuration. +func WithServiceName(serviceName string) func(*config.Router) { + return func(r *config.Router) { + r.Service = serviceName + } +} + +// WithLoadBalancerServices is a helper to create a configuration. +func WithLoadBalancerServices(opts ...func(service *config.LoadBalancerService) string) func(*config.Configuration) { + return func(c *config.Configuration) { + c.Services = make(map[string]*config.Service) + for _, opt := range opts { + b := &config.LoadBalancerService{} + name := opt(b) + c.Services[name] = &config.Service{ + LoadBalancer: b, + } + } + } +} + +// WithService is a helper to create a configuration. +func WithService(name string, opts ...func(*config.LoadBalancerService)) func(*config.LoadBalancerService) string { + return func(r *config.LoadBalancerService) string { + for _, opt := range opts { + opt(r) } return name } } -// WithServersNew is a helper to create a configuration -func WithServersNew(opts ...func(*types.Server) string) func(*types.Backend) { - return func(b *types.Backend) { - b.Servers = make(map[string]types.Server) +// WithMiddlewares is a helper to create a configuration. +func WithMiddlewares(opts ...func(*config.Middleware) string) func(*config.Configuration) { + return func(c *config.Configuration) { + c.Middlewares = make(map[string]*config.Middleware) for _, opt := range opts { - s := &types.Server{Weight: 1} - name := opt(s) - b.Servers[name] = *s + b := &config.Middleware{} + name := opt(b) + c.Middlewares[name] = b } } } -// WithServerNew is a helper to create a configuration -func WithServerNew(url string, opts ...func(*types.Server)) func(*types.Server) string { - return func(s *types.Server) string { +// WithMiddleware is a helper to create a configuration. +func WithMiddleware(name string, opts ...func(*config.Middleware)) func(*config.Middleware) string { + return func(r *config.Middleware) string { for _, opt := range opts { - opt(s) + opt(r) } - s.URL = url - return provider.Normalize(url) + return name } } -// WithLBMethod is a helper to create a configuration -func WithLBMethod(method string) func(*types.Backend) { - return func(b *types.Backend) { - if b.LoadBalancer == nil { - b.LoadBalancer = &types.LoadBalancer{} - } - b.LoadBalancer.Method = method +// WithBasicAuth is a helper to create a configuration. +func WithBasicAuth(auth *config.BasicAuth) func(*config.Middleware) { + return func(r *config.Middleware) { + r.BasicAuth = auth } } -// -- Frontend - -// WithFrontends is a helper to create a configuration -func WithFrontends(opts ...func(*types.Frontend) string) func(*types.Configuration) { - return func(c *types.Configuration) { - c.Frontends = make(map[string]*types.Frontend) - for _, opt := range opts { - f := &types.Frontend{} - name := opt(f) - c.Frontends[name] = f - } - } -} - -// WithFrontend is a helper to create a configuration -func WithFrontend(backend string, opts ...func(*types.Frontend)) func(*types.Frontend) string { - return func(f *types.Frontend) string { - for _, opt := range opts { - opt(f) - } - - // related the function WithFrontendName - name := f.Backend - f.Backend = backend - if len(name) > 0 { - return name - } - return backend - } -} - -// WithFrontendName is a helper to create a configuration -func WithFrontendName(name string) func(*types.Frontend) { - return func(f *types.Frontend) { - // store temporary the frontend name into the backend name - f.Backend = name - } -} - -// WithEntryPoints is a helper to create a configuration -func WithEntryPoints(eps ...string) func(*types.Frontend) { - return func(f *types.Frontend) { +// WithEntryPoints is a helper to create a configuration. +func WithEntryPoints(eps ...string) func(*config.Router) { + return func(f *config.Router) { f.EntryPoints = eps } } -// WithRoutes is a helper to create a configuration -func WithRoutes(opts ...func(*types.Route) string) func(*types.Frontend) { - return func(f *types.Frontend) { - f.Routes = make(map[string]types.Route) +// WithRule is a helper to create a configuration. +func WithRule(rule string) func(*config.Router) { + return func(f *config.Router) { + f.Rule = rule + } +} + +// WithServers is a helper to create a configuration. +func WithServers(opts ...func(*config.Server)) func(*config.LoadBalancerService) { + return func(b *config.LoadBalancerService) { for _, opt := range opts { - s := &types.Route{} - name := opt(s) - f.Routes[name] = *s + server := config.Server{Weight: 1} + opt(&server) + b.Servers = append(b.Servers, server) } } } -// WithRoute is a helper to create a configuration -func WithRoute(name string, rule string) func(*types.Route) string { - return func(r *types.Route) string { - r.Rule = rule - return name - } -} - -// WithFrontEndAuth is a helper to create a configuration -func WithFrontEndAuth(auth *types.Auth) func(*types.Frontend) { - return func(fe *types.Frontend) { - fe.Auth = auth - } -} - -// WithLBSticky is a helper to create a configuration -func WithLBSticky(cookieName string) func(*types.Backend) { - return func(b *types.Backend) { - if b.LoadBalancer == nil { - b.LoadBalancer = &types.LoadBalancer{} +// WithServer is a helper to create a configuration. +func WithServer(url string, opts ...func(*config.Server)) func(*config.Server) { + return func(s *config.Server) { + for _, opt := range opts { + opt(s) + } + s.URL = url + } +} + +// WithLBMethod is a helper to create a configuration. +func WithLBMethod(method string) func(*config.LoadBalancerService) { + return func(b *config.LoadBalancerService) { + b.Method = method + } +} + +// WithStickiness is a helper to create a configuration. +func WithStickiness(cookieName string) func(*config.LoadBalancerService) { + return func(b *config.LoadBalancerService) { + b.Stickiness = &config.Stickiness{ + CookieName: cookieName, } - b.LoadBalancer.Stickiness = &types.Stickiness{CookieName: cookieName} } } diff --git a/tracing/carrier.go b/tracing/carrier.go new file mode 100644 index 000000000..57f54865d --- /dev/null +++ b/tracing/carrier.go @@ -0,0 +1,25 @@ +package tracing + +import "net/http" + +// HTTPHeadersCarrier custom implementation to fix duplicated headers +// It has been fixed in https://github.com/opentracing/opentracing-go/pull/191 +type HTTPHeadersCarrier http.Header + +// Set conforms to the TextMapWriter interface. +func (c HTTPHeadersCarrier) Set(key, val string) { + h := http.Header(c) + h.Set(key, val) +} + +// ForeachKey conforms to the TextMapReader interface. +func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { + for k, vals := range c { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + return nil +} diff --git a/tracing/datadog/datadog.go b/tracing/datadog/datadog.go new file mode 100644 index 000000000..61a0df01b --- /dev/null +++ b/tracing/datadog/datadog.go @@ -0,0 +1,45 @@ +package datadog + +import ( + "io" + "strings" + + "github.com/containous/traefik/log" + "github.com/opentracing/opentracing-go" + ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer" + datadog "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" +) + +// Name sets the name of this tracer +const Name = "datadog" + +// Config provides configuration settings for a datadog tracer +type Config struct { + LocalAgentHostPort string `description:"Set datadog-agent's host:port that the reporter will used. Defaults to localhost:8126" export:"false"` + GlobalTag string `description:"Key:Value tag to be set on all the spans." export:"true"` + Debug bool `description:"Enable DataDog debug." export:"true"` +} + +// Setup sets up the tracer +func (c *Config) Setup(serviceName string) (opentracing.Tracer, io.Closer, error) { + tag := strings.SplitN(c.GlobalTag, ":", 2) + + value := "" + if len(tag) == 2 { + value = tag[1] + } + + tracer := ddtracer.New( + datadog.WithAgentAddr(c.LocalAgentHostPort), + datadog.WithServiceName(serviceName), + datadog.WithGlobalTag(tag[0], value), + datadog.WithDebugMode(c.Debug), + ) + + // Without this, child spans are getting the NOOP tracer + opentracing.SetGlobalTracer(tracer) + + log.WithoutContext().Debug("DataDog tracer configured") + + return tracer, nil, nil +} diff --git a/tracing/jaeger/jaeger.go b/tracing/jaeger/jaeger.go new file mode 100644 index 000000000..1729936cf --- /dev/null +++ b/tracing/jaeger/jaeger.go @@ -0,0 +1,73 @@ +package jaeger + +import ( + "fmt" + "io" + + "github.com/containous/traefik/log" + "github.com/opentracing/opentracing-go" + jaegercfg "github.com/uber/jaeger-client-go/config" + "github.com/uber/jaeger-client-go/zipkin" + jaegermet "github.com/uber/jaeger-lib/metrics" +) + +// Name sets the name of this tracer +const Name = "jaeger" + +// Config provides configuration settings for a jaeger tracer +type Config struct { + SamplingServerURL string `description:"set the sampling server url." export:"false"` + SamplingType string `description:"set the sampling type." export:"true"` + SamplingParam float64 `description:"set the sampling parameter." export:"true"` + LocalAgentHostPort string `description:"set jaeger-agent's host:port that the reporter will used." export:"false"` + Gen128Bit bool `description:"generate 128 bit span IDs." export:"true"` + Propagation string `description:"which propgation format to use (jaeger/b3)." export:"true"` +} + +// Setup sets up the tracer +func (c *Config) Setup(componentName string) (opentracing.Tracer, io.Closer, error) { + jcfg := jaegercfg.Configuration{ + Sampler: &jaegercfg.SamplerConfig{ + SamplingServerURL: c.SamplingServerURL, + Type: c.SamplingType, + Param: c.SamplingParam, + }, + Reporter: &jaegercfg.ReporterConfig{ + LogSpans: true, + LocalAgentHostPort: c.LocalAgentHostPort, + }, + } + + jMetricsFactory := jaegermet.NullFactory + + opts := []jaegercfg.Option{ + jaegercfg.Logger(newJaegerLogger()), + jaegercfg.Metrics(jMetricsFactory), + jaegercfg.Gen128Bit(c.Gen128Bit), + } + + switch c.Propagation { + case "b3": + p := zipkin.NewZipkinB3HTTPHeaderPropagator() + opts = append(opts, + jaegercfg.Injector(opentracing.HTTPHeaders, p), + jaegercfg.Extractor(opentracing.HTTPHeaders, p), + ) + case "jaeger", "": + default: + return nil, nil, fmt.Errorf("unknown propagation format: %s", c.Propagation) + } + + // Initialize tracer with a logger and a metrics factory + closer, err := jcfg.InitGlobalTracer( + componentName, + opts..., + ) + if err != nil { + log.WithoutContext().Warnf("Could not initialize jaeger tracer: %s", err.Error()) + return nil, nil, err + } + log.WithoutContext().Debug("Jaeger tracer configured") + + return opentracing.GlobalTracer(), closer, nil +} diff --git a/tracing/jaeger/logger.go b/tracing/jaeger/logger.go new file mode 100644 index 000000000..e2c934d5c --- /dev/null +++ b/tracing/jaeger/logger.go @@ -0,0 +1,26 @@ +package jaeger + +import ( + "github.com/containous/traefik/log" + "github.com/sirupsen/logrus" +) + +// jaegerLogger is an implementation of the Logger interface that delegates to traefik log +type jaegerLogger struct { + logger logrus.FieldLogger +} + +func newJaegerLogger() *jaegerLogger { + return &jaegerLogger{ + logger: log.WithoutContext().WithField(log.TracingProviderName, "jaeger"), + } +} + +func (l *jaegerLogger) Error(msg string) { + l.logger.Errorf("Tracing jaeger error: %s", msg) +} + +// Infof logs a message at debug priority +func (l *jaegerLogger) Infof(msg string, args ...interface{}) { + l.logger.Debugf(msg, args...) +} diff --git a/tracing/operation_name.go b/tracing/operation_name.go new file mode 100644 index 000000000..2321eec8e --- /dev/null +++ b/tracing/operation_name.go @@ -0,0 +1,65 @@ +package tracing + +import ( + "crypto/sha256" + "fmt" + "strings" + + "github.com/containous/traefik/log" +) + +// TraceNameHashLength defines the number of characters to use from the head of the generated hash. +const TraceNameHashLength = 8 + +// OperationNameMaxLengthNumber defines the number of static characters in a Span Trace name: +// 8 chars for hash + 2 chars for '_'. +const OperationNameMaxLengthNumber = 10 + +func generateOperationName(prefix string, parts []string, sep string, spanLimit int) string { + name := prefix + " " + strings.Join(parts, sep) + + maxLength := OperationNameMaxLengthNumber + len(prefix) + 1 + + if spanLimit > 0 && len(name) > spanLimit { + if spanLimit < maxLength { + log.WithoutContext().Warnf("SpanNameLimit cannot be lesser than %d: falling back on %d, maxLength, maxLength+3", maxLength) + spanLimit = maxLength + 3 + } + + limit := (spanLimit - maxLength) / 2 + + var fragments []string + for _, value := range parts { + fragments = append(fragments, truncateString(value, limit)) + } + fragments = append(fragments, computeHash(name)) + + name = prefix + " " + strings.Join(fragments, sep) + } + + return name +} + +// truncateString reduces the length of the 'str' argument to 'num' - 3 and adds a '...' suffix to the tail. +func truncateString(str string, num int) string { + text := str + if len(str) > num { + if num > 3 { + num -= 3 + } + text = str[0:num] + "..." + } + return text +} + +// computeHash returns the first TraceNameHashLength character of the sha256 hash for 'name' argument. +func computeHash(name string) string { + data := []byte(name) + hash := sha256.New() + if _, err := hash.Write(data); err != nil { + // Impossible case + log.WithoutContext().WithField("OperationName", name).Errorf("Failed to create Span name hash for %s: %v", name, err) + } + + return fmt.Sprintf("%x", hash.Sum(nil))[:TraceNameHashLength] +} diff --git a/tracing/operation_name_test.go b/tracing/operation_name_test.go new file mode 100644 index 000000000..dd9542c67 --- /dev/null +++ b/tracing/operation_name_test.go @@ -0,0 +1,135 @@ +package tracing + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_generateOperationName(t *testing.T) { + testCases := []struct { + desc string + prefix string + parts []string + sep string + spanLimit int + expected string + }{ + { + desc: "empty", + expected: " ", + }, + { + desc: "with prefix, without parts", + prefix: "foo", + parts: []string{}, + sep: "-", + spanLimit: 0, + expected: "foo ", + }, + { + desc: "with prefix, without parts, too small span limit", + prefix: "foo", + parts: []string{}, + sep: "-", + spanLimit: 1, + expected: "foo 6c2d2c76", + }, + { + desc: "with prefix, with parts", + prefix: "foo", + parts: []string{"fii", "fuu", "fee", "faa"}, + sep: "-", + spanLimit: 0, + expected: "foo fii-fuu-fee-faa", + }, + { + desc: "with prefix, with parts, with span limit", + prefix: "foo", + parts: []string{"fff", "ooo", "ooo", "bbb", "aaa", "rrr"}, + sep: "-", + spanLimit: 20, + expected: "foo fff-ooo-ooo-bbb-aaa-rrr-1a8e8ac1", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + opName := generateOperationName(test.prefix, test.parts, test.sep, test.spanLimit) + assert.Equal(t, test.expected, opName) + }) + } +} + +func TestComputeHash(t *testing.T) { + testCases := []struct { + desc string + text string + expected string + }{ + { + desc: "hashing", + text: "some very long pice of text", + expected: "0258ea1c", + }, + { + desc: "short text less than limit 10", + text: "short", + expected: "f9b0078b", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + actual := computeHash(test.text) + + assert.Equal(t, test.expected, actual) + }) + } +} + +func TestTruncateString(t *testing.T) { + testCases := []struct { + desc string + text string + limit int + expected string + }{ + { + desc: "short text less than limit 10", + text: "short", + limit: 10, + expected: "short", + }, + { + desc: "basic truncate with limit 10", + text: "some very long pice of text", + limit: 10, + expected: "some ve...", + }, + { + desc: "truncate long FQDN to 39 chars", + text: "some-service-100.slug.namespace.environment.domain.tld", + limit: 39, + expected: "some-service-100.slug.namespace.envi...", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + actual := truncateString(test.text, test.limit) + + assert.Equal(t, test.expected, actual) + assert.True(t, len(actual) <= test.limit) + }) + } +} diff --git a/tracing/tracing.go b/tracing/tracing.go new file mode 100644 index 000000000..5f01e0467 --- /dev/null +++ b/tracing/tracing.go @@ -0,0 +1,187 @@ +package tracing + +import ( + "context" + "fmt" + "io" + "net/http" + + "github.com/containous/traefik/log" + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" +) + +type contextKey int + +const ( + // SpanKindNoneEnum Span kind enum none. + SpanKindNoneEnum ext.SpanKindEnum = "none" + tracingKey contextKey = iota +) + +// WithTracing Adds Tracing into the context. +func WithTracing(ctx context.Context, tracing *Tracing) context.Context { + return context.WithValue(ctx, tracingKey, tracing) +} + +// FromContext Gets Tracing from context. +func FromContext(ctx context.Context) (*Tracing, error) { + if ctx == nil { + panic("nil context") + } + + tracer, ok := ctx.Value(tracingKey).(*Tracing) + if !ok { + return nil, fmt.Errorf("unable to find tracing in the context") + } + return tracer, nil +} + +// TrackingBackend is an abstraction for tracking backend (Jaeger, Zipkin, ...). +type TrackingBackend interface { + Setup(componentName string) (opentracing.Tracer, io.Closer, error) +} + +// Tracing middleware. +type Tracing struct { + ServiceName string `description:"Set the name for this service" export:"true"` + SpanNameLimit int `description:"Set the maximum character limit for Span names (default 0 = no limit)" export:"true"` + + tracer opentracing.Tracer + closer io.Closer +} + +// NewTracing Creates a Tracing. +func NewTracing(serviceName string, spanNameLimit int, trackingBackend TrackingBackend) (*Tracing, error) { + tracing := &Tracing{ + ServiceName: serviceName, + SpanNameLimit: spanNameLimit, + } + + var err error + tracing.tracer, tracing.closer, err = trackingBackend.Setup(serviceName) + if err != nil { + return nil, err + } + return tracing, nil +} + +// StartSpan delegates to opentracing.Tracer. +func (t *Tracing) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span { + return t.tracer.StartSpan(operationName, opts...) +} + +// StartSpanf delegates to StartSpan. +func (t *Tracing) StartSpanf(r *http.Request, spanKind ext.SpanKindEnum, opPrefix string, opParts []string, separator string, opts ...opentracing.StartSpanOption) (opentracing.Span, *http.Request, func()) { + operationName := generateOperationName(opPrefix, opParts, separator, t.SpanNameLimit) + + return StartSpan(r, operationName, spanKind, opts...) +} + +// Inject delegates to opentracing.Tracer. +func (t *Tracing) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error { + return t.tracer.Inject(sm, format, carrier) +} + +// Extract delegates to opentracing.Tracer. +func (t *Tracing) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { + return t.tracer.Extract(format, carrier) +} + +// IsEnabled determines if tracing was successfully activated. +func (t *Tracing) IsEnabled() bool { + if t == nil || t.tracer == nil { + return false + } + return true +} + +// Close tracer +func (t *Tracing) Close() { + if t.closer != nil { + err := t.closer.Close() + if err != nil { + log.WithoutContext().Warn(err) + } + } +} + +// LogRequest used to create span tags from the request. +func LogRequest(span opentracing.Span, r *http.Request) { + if span != nil && r != nil { + ext.HTTPMethod.Set(span, r.Method) + ext.HTTPUrl.Set(span, r.URL.String()) + span.SetTag("http.host", r.Host) + } +} + +// LogResponseCode used to log response code in span. +func LogResponseCode(span opentracing.Span, code int) { + if span != nil { + ext.HTTPStatusCode.Set(span, uint16(code)) + if code >= 400 { + ext.Error.Set(span, true) + } + } +} + +// GetSpan used to retrieve span from request context. +func GetSpan(r *http.Request) opentracing.Span { + return opentracing.SpanFromContext(r.Context()) +} + +// InjectRequestHeaders used to inject OpenTracing headers into the request. +func InjectRequestHeaders(r *http.Request) { + if span := GetSpan(r); span != nil { + err := opentracing.GlobalTracer().Inject( + span.Context(), + opentracing.HTTPHeaders, + HTTPHeadersCarrier(r.Header)) + if err != nil { + log.FromContext(r.Context()).Error(err) + } + } +} + +// LogEventf logs an event to the span in the request context. +func LogEventf(r *http.Request, format string, args ...interface{}) { + if span := GetSpan(r); span != nil { + span.LogKV("event", fmt.Sprintf(format, args...)) + } +} + +// StartSpan starts a new span from the one in the request context +func StartSpan(r *http.Request, operationName string, spanKind ext.SpanKindEnum, opts ...opentracing.StartSpanOption) (opentracing.Span, *http.Request, func()) { + span, ctx := opentracing.StartSpanFromContext(r.Context(), operationName, opts...) + + switch spanKind { + case ext.SpanKindRPCClientEnum: + ext.SpanKindRPCClient.Set(span) + case ext.SpanKindRPCServerEnum: + ext.SpanKindRPCServer.Set(span) + case ext.SpanKindProducerEnum: + ext.SpanKindProducer.Set(span) + case ext.SpanKindConsumerEnum: + ext.SpanKindConsumer.Set(span) + default: + // noop + } + + r = r.WithContext(ctx) + return span, r, func() { + span.Finish() + } +} + +// SetError flags the span associated with this request as in error. +func SetError(r *http.Request) { + if span := GetSpan(r); span != nil { + ext.Error.Set(span, true) + } +} + +// SetErrorWithEvent flags the span associated with this request as in error and log an event. +func SetErrorWithEvent(r *http.Request, format string, args ...interface{}) { + SetError(r) + LogEventf(r, format, args...) +} diff --git a/tracing/zipkin/zipkin.go b/tracing/zipkin/zipkin.go new file mode 100644 index 000000000..7b0f8a717 --- /dev/null +++ b/tracing/zipkin/zipkin.go @@ -0,0 +1,47 @@ +package zipkin + +import ( + "io" + + "github.com/containous/traefik/log" + "github.com/opentracing/opentracing-go" + zipkin "github.com/openzipkin/zipkin-go-opentracing" +) + +// Name sets the name of this tracer. +const Name = "zipkin" + +// Config provides configuration settings for a zipkin tracer. +type Config struct { + HTTPEndpoint string `description:"HTTP Endpoint to report traces to." export:"false"` + SameSpan bool `description:"Use ZipKin SameSpan RPC style traces." export:"true"` + ID128Bit bool `description:"Use ZipKin 128 bit root span IDs." export:"true"` + Debug bool `description:"Enable Zipkin debug." export:"true"` +} + +// Setup sets up the tracer +func (c *Config) Setup(serviceName string) (opentracing.Tracer, io.Closer, error) { + collector, err := zipkin.NewHTTPCollector(c.HTTPEndpoint) + if err != nil { + return nil, nil, err + } + + recorder := zipkin.NewRecorder(collector, c.Debug, "0.0.0.0:0", serviceName) + + tracer, err := zipkin.NewTracer( + recorder, + zipkin.ClientServerSameSpan(c.SameSpan), + zipkin.TraceID128Bit(c.ID128Bit), + zipkin.DebugMode(c.Debug), + ) + if err != nil { + return nil, nil, err + } + + // Without this, child spans are getting the NOOP tracer + opentracing.SetGlobalTracer(tracer) + + log.WithoutContext().Debug("Zipkin tracer configured") + + return tracer, collector, nil +} diff --git a/types/constraints.go b/types/constraints.go new file mode 100644 index 000000000..9f979b0f9 --- /dev/null +++ b/types/constraints.go @@ -0,0 +1,122 @@ +package types + +import ( + "encoding" + "errors" + "fmt" + "strings" + + "github.com/ryanuber/go-glob" +) + +// Constraint holds a parsed constraint expression. +type Constraint struct { + Key string `export:"true"` + // MustMatch is true if operator is "==" or false if operator is "!=" + MustMatch bool `export:"true"` + // TODO: support regex + Regex string `export:"true"` +} + +// NewConstraint receives a string and return a *Constraint, after checking syntax and parsing the constraint expression. +func NewConstraint(exp string) (*Constraint, error) { + sep := "" + constraint := &Constraint{} + + if strings.Contains(exp, "==") { + sep = "==" + constraint.MustMatch = true + } else if strings.Contains(exp, "!=") { + sep = "!=" + constraint.MustMatch = false + } else { + return nil, errors.New("constraint expression missing valid operator: '==' or '!='") + } + + kv := strings.SplitN(exp, sep, 2) + if len(kv) == 2 { + // At the moment, it only supports tags + if kv[0] != "tag" { + return nil, errors.New("constraint must be tag-based. Syntax: tag==us-*") + } + + constraint.Key = kv[0] + constraint.Regex = kv[1] + return constraint, nil + } + + return nil, fmt.Errorf("incorrect constraint expression: %s", exp) +} + +func (c *Constraint) String() string { + if c.MustMatch { + return c.Key + "==" + c.Regex + } + return c.Key + "!=" + c.Regex +} + +var _ encoding.TextUnmarshaler = (*Constraint)(nil) + +// UnmarshalText defines how unmarshal in TOML parsing +func (c *Constraint) UnmarshalText(text []byte) error { + constraint, err := NewConstraint(string(text)) + if err != nil { + return err + } + c.Key = constraint.Key + c.MustMatch = constraint.MustMatch + c.Regex = constraint.Regex + return nil +} + +var _ encoding.TextMarshaler = (*Constraint)(nil) + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the result. +func (c *Constraint) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +// MatchConstraintWithAtLeastOneTag tests a constraint for one single service. +func (c *Constraint) MatchConstraintWithAtLeastOneTag(tags []string) bool { + for _, tag := range tags { + if glob.Glob(c.Regex, tag) { + return true + } + } + return false +} + +// Set []*Constraint. +func (cs *Constraints) Set(str string) error { + exps := strings.Split(str, ",") + if len(exps) == 0 { + return fmt.Errorf("bad Constraint format: %s", str) + } + for _, exp := range exps { + constraint, err := NewConstraint(exp) + if err != nil { + return err + } + *cs = append(*cs, constraint) + } + return nil +} + +// Constraints holds a Constraint parser. +type Constraints []*Constraint + +// Get []*Constraint +func (cs *Constraints) Get() interface{} { return []*Constraint(*cs) } + +// String returns []*Constraint in string. +func (cs *Constraints) String() string { return fmt.Sprintf("%+v", *cs) } + +// SetValue sets []*Constraint into the parser. +func (cs *Constraints) SetValue(val interface{}) { + *cs = val.(Constraints) +} + +// Type exports the Constraints type as a string. +func (cs *Constraints) Type() string { + return "constraint" +} diff --git a/types/domains.go b/types/domains.go index 2cace3f64..55d85c25e 100644 --- a/types/domains.go +++ b/types/domains.go @@ -5,13 +5,13 @@ import ( "strings" ) -// Domain holds a domain name with SANs +// Domain holds a domain name with SANs. type Domain struct { Main string SANs []string } -// ToStrArray convert a domain into an array of strings +// ToStrArray convert a domain into an array of strings. func (d *Domain) ToStrArray() []string { var domains []string if len(d.Main) > 0 { @@ -20,7 +20,7 @@ func (d *Domain) ToStrArray() []string { return append(domains, d.SANs...) } -// Set sets a domains from an array of strings +// Set sets a domains from an array of strings. func (d *Domain) Set(domains []string) { if len(domains) > 0 { d.Main = domains[0] @@ -28,7 +28,7 @@ func (d *Domain) Set(domains []string) { } } -// Domains parse []Domain +// Domains parse []Domain. type Domains []Domain // Set []Domain @@ -55,10 +55,10 @@ func (ds *Domains) Set(str string) error { return nil } -// Get []Domain +// Get []Domain. func (ds *Domains) Get() interface{} { return []Domain(*ds) } -// String returns []Domain in string +// String returns []Domain in string. func (ds *Domains) String() string { return fmt.Sprintf("%+v", *ds) } // SetValue sets []Domain into the parser @@ -66,7 +66,7 @@ func (ds *Domains) SetValue(val interface{}) { *ds = val.([]Domain) } -// MatchDomain return true if a domain match the cert domain +// MatchDomain returns true if a domain match the cert domain. func MatchDomain(domain string, certDomain string) bool { if domain == certDomain { return true @@ -86,3 +86,8 @@ func MatchDomain(domain string, certDomain string) bool { } return false } + +// CanonicalDomain returns a lower case domain with trim space. +func CanonicalDomain(domain string) string { + return strings.ToLower(strings.TrimSpace(domain)) +} diff --git a/types/http_code_range.go b/types/http_code_range.go new file mode 100644 index 000000000..95defbf49 --- /dev/null +++ b/types/http_code_range.go @@ -0,0 +1,44 @@ +package types + +import ( + "strconv" + "strings" +) + +// HTTPCodeRanges holds HTTP code ranges +type HTTPCodeRanges [][2]int + +// NewHTTPCodeRanges creates HTTPCodeRanges from a given []string. +// Break out the http status code ranges into a low int and high int +// for ease of use at runtime +func NewHTTPCodeRanges(strBlocks []string) (HTTPCodeRanges, error) { + var blocks HTTPCodeRanges + for _, block := range strBlocks { + codes := strings.Split(block, "-") + // if only a single HTTP code was configured, assume the best and create the correct configuration on the user's behalf + if len(codes) == 1 { + codes = append(codes, codes[0]) + } + lowCode, err := strconv.Atoi(codes[0]) + if err != nil { + return nil, err + } + highCode, err := strconv.Atoi(codes[1]) + if err != nil { + return nil, err + } + blocks = append(blocks, [2]int{lowCode, highCode}) + } + return blocks, nil +} + +// Contains tests whether the passed status code is within +// one of its HTTP code ranges. +func (h HTTPCodeRanges) Contains(statusCode int) bool { + for _, block := range h { + if statusCode >= block[0] && statusCode <= block[1] { + return true + } + } + return false +} diff --git a/types/logs.go b/types/logs.go index 118de9c5f..d43f2ea4c 100644 --- a/types/logs.go +++ b/types/logs.go @@ -18,6 +18,7 @@ const ( // TraefikLog holds the configuration settings for the traefik logger. type TraefikLog struct { + LogLevel string `description:"Log level set to traefik logs." export:"true"` FilePath string `json:"file,omitempty" description:"Traefik log file path. Stdout is used when omitted or empty"` Format string `json:"format,omitempty" description:"Traefik log format: json | common"` } diff --git a/types/metrics.go b/types/metrics.go new file mode 100644 index 000000000..38a660825 --- /dev/null +++ b/types/metrics.go @@ -0,0 +1,82 @@ +package types + +import ( + "fmt" + "strconv" + "strings" +) + +// Metrics provides options to expose and send Traefik metrics to different third party monitoring systems +type Metrics struct { + Prometheus *Prometheus `description:"Prometheus metrics exporter type" export:"true"` + Datadog *Datadog `description:"DataDog metrics exporter type" export:"true"` + StatsD *Statsd `description:"StatsD metrics exporter type" export:"true"` + InfluxDB *InfluxDB `description:"InfluxDB metrics exporter type"` +} + +// Prometheus can contain specific configuration used by the Prometheus Metrics exporter +type Prometheus struct { + Buckets Buckets `description:"Buckets for latency metrics" export:"true"` + EntryPoint string `description:"EntryPoint" export:"true"` + Middlewares []string `description:"Middlewares" export:"true"` +} + +// Datadog contains address and metrics pushing interval configuration +type Datadog struct { + Address string `description:"DataDog's address"` + PushInterval string `description:"DataDog push interval" export:"true"` +} + +// Statsd contains address and metrics pushing interval configuration +type Statsd struct { + Address string `description:"StatsD address"` + PushInterval string `description:"StatsD push interval" export:"true"` +} + +// InfluxDB contains address, login and metrics pushing interval configuration +type InfluxDB struct { + Address string `description:"InfluxDB address"` + Protocol string `description:"InfluxDB address protocol (udp or http)"` + PushInterval string `description:"InfluxDB push interval" export:"true"` + Database string `description:"InfluxDB database used when protocol is http" export:"true"` + RetentionPolicy string `description:"InfluxDB retention policy used when protocol is http" export:"true"` + Username string `description:"InfluxDB username (only with http)" export:"true"` + Password string `description:"InfluxDB password (only with http)" export:"true"` +} + +// Statistics provides options for monitoring request and response stats +type Statistics struct { + RecentErrors int `description:"Number of recent errors logged" export:"true"` +} + +// Buckets holds Prometheus Buckets +type Buckets []float64 + +// Set adds strings elem into the the parser +// it splits str on "," and ";" and apply ParseFloat to string +func (b *Buckets) Set(str string) error { + fargs := func(c rune) bool { + return c == ',' || c == ';' + } + // get function + slice := strings.FieldsFunc(str, fargs) + for _, bucket := range slice { + bu, err := strconv.ParseFloat(bucket, 64) + if err != nil { + return err + } + *b = append(*b, bu) + } + return nil +} + +// Get []float64 +func (b *Buckets) Get() interface{} { return *b } + +// String return slice in a string +func (b *Buckets) String() string { return fmt.Sprintf("%v", *b) } + +// SetValue sets []float64 into the parser +func (b *Buckets) SetValue(val interface{}) { + *b = val.(Buckets) +} diff --git a/types/route_appender.go b/types/route_appender.go new file mode 100644 index 000000000..01e24a3bf --- /dev/null +++ b/types/route_appender.go @@ -0,0 +1,10 @@ +package types + +import ( + "github.com/containous/mux" +) + +// RouteAppender appends routes on a router (/api, /ping ...) +type RouteAppender interface { + Append(systemRouter *mux.Router) +} diff --git a/vendor/code.cloudfoundry.org/clock/LICENSE b/vendor/code.cloudfoundry.org/clock/LICENSE deleted file mode 100644 index f49a4e16e..000000000 --- a/vendor/code.cloudfoundry.org/clock/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/clock/NOTICE b/vendor/code.cloudfoundry.org/clock/NOTICE deleted file mode 100644 index 29c0e5ff0..000000000 --- a/vendor/code.cloudfoundry.org/clock/NOTICE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. - -This project contains software that is Copyright (c) 2015 Pivotal Software, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This project may include a number of subcomponents with separate -copyright notices and license terms. Your use of these subcomponents -is subject to the terms and conditions of each subcomponent's license, -as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/clock/clock.go b/vendor/code.cloudfoundry.org/clock/clock.go deleted file mode 100644 index 6b091d99a..000000000 --- a/vendor/code.cloudfoundry.org/clock/clock.go +++ /dev/null @@ -1,53 +0,0 @@ -package clock - -import "time" - -type Clock interface { - Now() time.Time - Sleep(d time.Duration) - Since(t time.Time) time.Duration - // After waits for the duration to elapse and then sends the current time - // on the returned channel. - // It is equivalent to clock.NewTimer(d).C. - // The underlying Timer is not recovered by the garbage collector - // until the timer fires. If efficiency is a concern, use clock.NewTimer - // instead and call Timer.Stop if the timer is no longer needed. - After(d time.Duration) <-chan time.Time - - NewTimer(d time.Duration) Timer - NewTicker(d time.Duration) Ticker -} - -type realClock struct{} - -func NewClock() Clock { - return &realClock{} -} - -func (clock *realClock) Now() time.Time { - return time.Now() -} - -func (clock *realClock) Since(t time.Time) time.Duration { - return time.Now().Sub(t) -} - -func (clock *realClock) Sleep(d time.Duration) { - <-clock.NewTimer(d).C() -} - -func (clock *realClock) After(d time.Duration) <-chan time.Time { - return clock.NewTimer(d).C() -} - -func (clock *realClock) NewTimer(d time.Duration) Timer { - return &realTimer{ - t: time.NewTimer(d), - } -} - -func (clock *realClock) NewTicker(d time.Duration) Ticker { - return &realTicker{ - t: time.NewTicker(d), - } -} diff --git a/vendor/code.cloudfoundry.org/clock/package.go b/vendor/code.cloudfoundry.org/clock/package.go deleted file mode 100644 index 349f67c82..000000000 --- a/vendor/code.cloudfoundry.org/clock/package.go +++ /dev/null @@ -1 +0,0 @@ -package clock // import "code.cloudfoundry.org/clock" diff --git a/vendor/code.cloudfoundry.org/clock/ticker.go b/vendor/code.cloudfoundry.org/clock/ticker.go deleted file mode 100644 index f25129e1c..000000000 --- a/vendor/code.cloudfoundry.org/clock/ticker.go +++ /dev/null @@ -1,20 +0,0 @@ -package clock - -import "time" - -type Ticker interface { - C() <-chan time.Time - Stop() -} - -type realTicker struct { - t *time.Ticker -} - -func (t *realTicker) C() <-chan time.Time { - return t.t.C -} - -func (t *realTicker) Stop() { - t.t.Stop() -} diff --git a/vendor/code.cloudfoundry.org/clock/timer.go b/vendor/code.cloudfoundry.org/clock/timer.go deleted file mode 100644 index cf8c22125..000000000 --- a/vendor/code.cloudfoundry.org/clock/timer.go +++ /dev/null @@ -1,25 +0,0 @@ -package clock - -import "time" - -type Timer interface { - C() <-chan time.Time - Reset(d time.Duration) bool - Stop() bool -} - -type realTimer struct { - t *time.Timer -} - -func (t *realTimer) C() <-chan time.Time { - return t.t.C -} - -func (t *realTimer) Reset(d time.Duration) bool { - return t.t.Reset(d) -} - -func (t *realTimer) Stop() bool { - return t.t.Stop() -} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/LICENSE b/vendor/github.com/Microsoft/ApplicationInsights-Go/LICENSE deleted file mode 100644 index 01d022c22..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015-2017 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/bond.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/bond.go deleted file mode 100644 index 1af6f5949..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/bond.go +++ /dev/null @@ -1,122 +0,0 @@ -package appinsights - -type Domain interface { -} - -type domain struct { - Ver int `json:"ver"` - Properties map[string]string `json:"properties"` -} - -type data struct { - BaseType string `json:"baseType"` - BaseData Domain `json:"baseData"` -} - -type envelope struct { - Name string `json:"name"` - Time string `json:"time"` - IKey string `json:"iKey"` - Tags map[string]string `json:"tags"` - Data *data `json:"data"` -} - -type DataPointType int - -const ( - Measurement DataPointType = iota - Aggregation -) - -type DataPoint struct { - Name string `json:"name"` - Kind DataPointType `json:"kind"` - Value float32 `json:"value"` - Count int `json:"count"` - min float32 `json:"min"` - max float32 `json:"max"` - stdDev float32 `json:"stdDev"` -} - -type metricData struct { - domain - Metrics []*DataPoint `json:"metrics"` -} - -type eventData struct { - domain - Name string `json:"name"` - Measurements map[string]float32 `json:"measurements"` -} - -type SeverityLevel int - -const ( - Verbose SeverityLevel = iota - Information - Warning - Error - Critical -) - -type messageData struct { - domain - Message string `json:"message"` - SeverityLevel SeverityLevel `json:"severityLevel"` -} - -type requestData struct { - domain - Id string `json:"id"` - Name string `json:"name"` - StartTime string `json:"startTime"` // yyyy-mm-ddThh:mm:ss.fffffff-hh:mm - Duration string `json:"duration"` // d:hh:mm:ss.fffffff - ResponseCode string `json:"responseCode"` - Success bool `json:"success"` - HttpMethod string `json:"httpMethod"` - Url string `json:"url"` - Measurements map[string]float32 `json:"measurements"` -} - -type ContextTagKeys string - -const ( - ApplicationVersion ContextTagKeys = "ai.application.ver" - ApplicationBuild = "ai.application.build" - CloudRole = "ai.cloud.role" - CloudRoleInstance = "ai.cloud.roleInstance" - DeviceId = "ai.device.id" - DeviceIp = "ai.device.ip" - DeviceLanguage = "ai.device.language" - DeviceLocale = "ai.device.locale" - DeviceModel = "ai.device.model" - DeviceNetwork = "ai.device.network" - DeviceOEMName = "ai.device.oemName" - DeviceOS = "ai.device.os" - DeviceOSVersion = "ai.device.osVersion" - DeviceRoleInstance = "ai.device.roleInstance" - DeviceRoleName = "ai.device.roleName" - DeviceScreenResolution = "ai.device.screenResolution" - DeviceType = "ai.device.type" - DeviceMachineName = "ai.device.machineName" - LocationIp = "ai.location.ip" - OperationCorrelationVector = "ai.operation.correlationVector" - OperationId = "ai.operation.id" - OperationName = "ai.operation.name" - OperationParentId = "ai.operation.parentId" - OperationRootId = "ai.operation.rootId" - OperationSyntheticSource = "ai.operation.syntheticSource" - OperationIsSynthetic = "ai.operation.isSynthetic" - SessionId = "ai.session.id" - SessionIsFirst = "ai.session.isFirst" - SessionIsNew = "ai.session.isNew" - UserAccountAcquisitionDate = "ai.user.accountAcquisitionDate" - UserAccountId = "ai.user.accountId" - UserAgent = "ai.user.userAgent" - UserAuthUserId = "ai.user.authUserId" - UserId = "ai.user.id" - UserStoreRegion = "ai.user.storeRegion" - SampleRate = "ai.sample.sampleRate" - InternalSdkVersion = "ai.internal.sdkVersion" - InternalAgentVersion = "ai.internal.agentVersion" -) diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/client.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/client.go deleted file mode 100644 index c7d56161e..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/client.go +++ /dev/null @@ -1,132 +0,0 @@ -package appinsights - -import "time" - -type TelemetryClient interface { - Context() TelemetryContext - InstrumentationKey() string - Channel() TelemetryChannel - IsEnabled() bool - SetIsEnabled(bool) - Track(Telemetry) - TrackEvent(string) - TrackEventTelemetry(*EventTelemetry) - TrackMetric(string, float32) - TrackMetricTelemetry(*MetricTelemetry) - TrackTrace(string) - TrackTraceTelemetry(*TraceTelemetry) - TrackRequest(string, string, string, time.Time, time.Duration, string, bool) - TrackRequestTelemetry(*RequestTelemetry) -} - -type telemetryClient struct { - TelemetryConfiguration *TelemetryConfiguration - channel TelemetryChannel - context TelemetryContext - isEnabled bool -} - -func NewTelemetryClient(iKey string) TelemetryClient { - return NewTelemetryClientFromConfig(NewTelemetryConfiguration(iKey)) -} - -func NewTelemetryClientFromConfig(config *TelemetryConfiguration) TelemetryClient { - channel := NewInMemoryChannel(config) - context := NewClientTelemetryContext() - return &telemetryClient{ - TelemetryConfiguration: config, - channel: channel, - context: context, - isEnabled: true, - } -} - -func (tc *telemetryClient) Context() TelemetryContext { - return tc.context -} - -func (tc *telemetryClient) Channel() TelemetryChannel { - return tc.channel -} - -func (tc *telemetryClient) InstrumentationKey() string { - return tc.TelemetryConfiguration.InstrumentationKey -} - -func (tc *telemetryClient) IsEnabled() bool { - return tc.isEnabled -} - -func (tc *telemetryClient) SetIsEnabled(isEnabled bool) { - tc.isEnabled = isEnabled -} - -func (tc *telemetryClient) Track(item Telemetry) { - if tc.isEnabled { - iKey := tc.context.InstrumentationKey() - if len(iKey) == 0 { - iKey = tc.TelemetryConfiguration.InstrumentationKey - } - - itemContext := item.Context().(*telemetryContext) - itemContext.iKey = iKey - - clientContext := tc.context.(*telemetryContext) - - for tagkey, tagval := range clientContext.tags { - if itemContext.tags[tagkey] == "" { - itemContext.tags[tagkey] = tagval - } - } - - tc.channel.Send(item) - } -} - -func (tc *telemetryClient) TrackEvent(name string) { - item := NewEventTelemetry(name) - tc.TrackEventTelemetry(item) -} - -func (tc *telemetryClient) TrackEventTelemetry(event *EventTelemetry) { - var item Telemetry - item = event - - tc.Track(item) -} - -func (tc *telemetryClient) TrackMetric(name string, value float32) { - item := NewMetricTelemetry(name, value) - tc.TrackMetricTelemetry(item) -} - -func (tc *telemetryClient) TrackMetricTelemetry(metric *MetricTelemetry) { - var item Telemetry - item = metric - - tc.Track(item) -} - -func (tc *telemetryClient) TrackTrace(message string) { - item := NewTraceTelemetry(message, Information) - tc.TrackTraceTelemetry(item) -} - -func (tc *telemetryClient) TrackTraceTelemetry(trace *TraceTelemetry) { - var item Telemetry - item = trace - - tc.Track(item) -} - -func (tc *telemetryClient) TrackRequest(name, method, url string, timestamp time.Time, duration time.Duration, responseCode string, success bool) { - item := NewRequestTelemetry(name, method, url, timestamp, duration, responseCode, success) - tc.TrackRequestTelemetry(item) -} - -func (tc *telemetryClient) TrackRequestTelemetry(request *RequestTelemetry) { - var item Telemetry - item = request - - tc.Track(item) -} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/clock.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/clock.go deleted file mode 100644 index 1178b9eaa..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/clock.go +++ /dev/null @@ -1,11 +0,0 @@ -package appinsights - -// We need to mock out the clock for tests; we'll use this to do it. - -import "code.cloudfoundry.org/clock" - -var currentClock clock.Clock - -func init() { - currentClock = clock.NewClock() -} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/concurrentrandom.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/concurrentrandom.go deleted file mode 100644 index 552d498fd..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/concurrentrandom.go +++ /dev/null @@ -1,45 +0,0 @@ -package appinsights - -import ( - "encoding/base64" - "math/rand" - "sync/atomic" - "time" - "unsafe" -) - -type concurrentRandom struct { - channel chan string - random *rand.Rand -} - -var randomGenerator *concurrentRandom - -func newConcurrentRandom() *concurrentRandom { - source := rand.NewSource(time.Now().UnixNano()) - return &concurrentRandom{ - channel: make(chan string, 4), - random: rand.New(source), - } -} - -func (generator *concurrentRandom) run() { - buf := make([]byte, 8) - for { - generator.random.Read(buf) - generator.channel <- base64.StdEncoding.EncodeToString(buf) - } -} - -func randomId() string { - if randomGenerator == nil { - r := newConcurrentRandom() - if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&randomGenerator)), unsafe.Pointer(nil), unsafe.Pointer(r)) { - go r.run() - } else { - close(r.channel) - } - } - - return <-randomGenerator.channel -} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/configuration.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/configuration.go deleted file mode 100644 index 37c602ed6..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/configuration.go +++ /dev/null @@ -1,19 +0,0 @@ -package appinsights - -import "time" - -type TelemetryConfiguration struct { - InstrumentationKey string - EndpointUrl string - MaxBatchSize int - MaxBatchInterval time.Duration -} - -func NewTelemetryConfiguration(instrumentationKey string) *TelemetryConfiguration { - return &TelemetryConfiguration{ - InstrumentationKey: instrumentationKey, - EndpointUrl: "https://dc.services.visualstudio.com/v2/track", - MaxBatchSize: 1024, - MaxBatchInterval: time.Duration(10) * time.Second, - } -} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/datacontracts.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/datacontracts.go deleted file mode 100644 index 136a86e48..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/datacontracts.go +++ /dev/null @@ -1,228 +0,0 @@ -package appinsights - -import ( - "fmt" - "time" -) - -type Telemetry interface { - Timestamp() time.Time - Context() TelemetryContext - baseTypeName() string - baseData() Domain - SetProperty(string, string) -} - -type BaseTelemetry struct { - timestamp time.Time - context TelemetryContext -} - -type TraceTelemetry struct { - BaseTelemetry - data *messageData -} - -func NewTraceTelemetry(message string, severityLevel SeverityLevel) *TraceTelemetry { - now := time.Now() - data := &messageData{ - Message: message, - SeverityLevel: severityLevel, - } - - data.Ver = 2 - - item := &TraceTelemetry{ - data: data, - } - - item.timestamp = now - item.context = NewItemTelemetryContext() - - return item -} - -func (item *TraceTelemetry) Timestamp() time.Time { - return item.timestamp -} - -func (item *TraceTelemetry) Context() TelemetryContext { - return item.context -} - -func (item *TraceTelemetry) baseTypeName() string { - return "Message" -} - -func (item *TraceTelemetry) baseData() Domain { - return item.data -} - -func (item *TraceTelemetry) SetProperty(key, value string) { - if item.data.Properties == nil { - item.data.Properties = make(map[string]string) - } - item.data.Properties[key] = value -} - -type EventTelemetry struct { - BaseTelemetry - data *eventData -} - -func NewEventTelemetry(name string) *EventTelemetry { - now := time.Now() - data := &eventData{ - Name: name, - } - - data.Ver = 2 - - item := &EventTelemetry{ - data: data, - } - - item.timestamp = now - item.context = NewItemTelemetryContext() - - return item -} - -func (item *EventTelemetry) Timestamp() time.Time { - return item.timestamp -} - -func (item *EventTelemetry) Context() TelemetryContext { - return item.context -} - -func (item *EventTelemetry) baseTypeName() string { - return "Event" -} - -func (item *EventTelemetry) baseData() Domain { - return item.data -} - -func (item *EventTelemetry) SetProperty(key, value string) { - if item.data.Properties == nil { - item.data.Properties = make(map[string]string) - } - item.data.Properties[key] = value -} - -type MetricTelemetry struct { - BaseTelemetry - data *metricData -} - -func NewMetricTelemetry(name string, value float32) *MetricTelemetry { - now := time.Now() - metric := &DataPoint{ - Name: name, - Value: value, - Count: 1, - } - - data := &metricData{ - Metrics: make([]*DataPoint, 1), - } - - data.Ver = 2 - data.Metrics[0] = metric - - item := &MetricTelemetry{ - data: data, - } - - item.timestamp = now - item.context = NewItemTelemetryContext() - - return item -} - -func (item *MetricTelemetry) Timestamp() time.Time { - return item.timestamp -} - -func (item *MetricTelemetry) Context() TelemetryContext { - return item.context -} - -func (item *MetricTelemetry) baseTypeName() string { - return "Metric" -} - -func (item *MetricTelemetry) baseData() Domain { - return item.data -} - -func (item *MetricTelemetry) SetProperty(key, value string) { - if item.data.Properties == nil { - item.data.Properties = make(map[string]string) - } - item.data.Properties[key] = value -} - -type RequestTelemetry struct { - BaseTelemetry - data *requestData -} - -func NewRequestTelemetry(name, httpMethod, url string, timestamp time.Time, duration time.Duration, responseCode string, success bool) *RequestTelemetry { - now := time.Now() - data := &requestData{ - Name: name, - StartTime: timestamp.Format(time.RFC3339Nano), - Duration: formatDuration(duration), - ResponseCode: responseCode, - Success: success, - HttpMethod: httpMethod, - Url: url, - Id: randomId(), - } - - data.Ver = 2 - - item := &RequestTelemetry{ - data: data, - } - - item.timestamp = now - item.context = NewItemTelemetryContext() - - return item -} - -func (item *RequestTelemetry) Timestamp() time.Time { - return item.timestamp -} - -func (item *RequestTelemetry) Context() TelemetryContext { - return item.context -} - -func (item *RequestTelemetry) baseTypeName() string { - return "Request" -} - -func (item *RequestTelemetry) baseData() Domain { - return item.data -} - -func (item *RequestTelemetry) SetProperty(key, value string) { - if item.data.Properties == nil { - item.data.Properties = make(map[string]string) - } - item.data.Properties[key] = value -} - -func formatDuration(d time.Duration) string { - ticks := int64(d/(time.Nanosecond*100)) % 10000000 - seconds := int64(d/time.Second) % 60 - minutes := int64(d/time.Minute) % 60 - hours := int64(d/time.Hour) % 24 - days := int64(d / (time.Hour * 24)) - - return fmt.Sprintf("%d.%02d:%02d:%02d.%07d", days, hours, minutes, seconds, ticks) -} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/diagnostics.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/diagnostics.go deleted file mode 100644 index 7d609719e..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/diagnostics.go +++ /dev/null @@ -1,64 +0,0 @@ -package appinsights - -import "fmt" - -type DiagnosticsMessageWriter interface { - Write(string) - appendListener(*diagnosticsMessageListener) -} - -type diagnosticsMessageWriter struct { - listeners []chan string -} - -type DiagnosticsMessageProcessor func(string) - -type DiagnosticsMessageListener interface { - ProcessMessages(DiagnosticsMessageProcessor) -} - -type diagnosticsMessageListener struct { - channel chan string -} - -var diagnosticsWriter *diagnosticsMessageWriter = &diagnosticsMessageWriter{ - listeners: make([]chan string, 0), -} - -func NewDiagnosticsMessageListener() DiagnosticsMessageListener { - listener := &diagnosticsMessageListener{ - channel: make(chan string), - } - - diagnosticsWriter.appendListener(listener) - - return listener -} - -func (writer *diagnosticsMessageWriter) appendListener(listener *diagnosticsMessageListener) { - writer.listeners = append(writer.listeners, listener.channel) -} - -func (writer *diagnosticsMessageWriter) Write(message string) { - for _, c := range writer.listeners { - c <- message - } -} - -func (writer *diagnosticsMessageWriter) Printf(message string, args ...interface{}) { - // Don't bother with Sprintf if nobody is listening - if writer.hasListeners() { - writer.Write(fmt.Sprintf(message, args...)) - } -} - -func (writer *diagnosticsMessageWriter) hasListeners() bool { - return len(writer.listeners) > 0 -} - -func (listener *diagnosticsMessageListener) ProcessMessages(process DiagnosticsMessageProcessor) { - for { - message := <-listener.channel - process(message) - } -} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/inmemorychannel.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/inmemorychannel.go deleted file mode 100644 index 272170187..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/inmemorychannel.go +++ /dev/null @@ -1,408 +0,0 @@ -package appinsights - -import ( - "sync" - "time" - - "code.cloudfoundry.org/clock" -) - -var ( - submit_retries = []time.Duration{time.Duration(10 * time.Second), time.Duration(30 * time.Second), time.Duration(60 * time.Second)} -) - -type TelemetryBufferItems []Telemetry - -type InMemoryChannel struct { - endpointAddress string - isDeveloperMode bool - collectChan chan Telemetry - controlChan chan *inMemoryChannelControl - batchSize int - batchInterval time.Duration - waitgroup sync.WaitGroup - throttle *throttleManager - transmitter transmitter -} - -type inMemoryChannelControl struct { - // If true, flush the buffer. - flush bool - - // If true, stop listening on the channel. (Flush is required if any events are to be sent) - stop bool - - // If stopping and flushing, this specifies whether to retry submissions on error. - retry bool - - // If retrying, what is the max time to wait before finishing up? - timeout time.Duration - - // If specified, a message will be sent on this channel when all pending telemetry items have been submitted - callback chan struct{} -} - -func NewInMemoryChannel(config *TelemetryConfiguration) *InMemoryChannel { - channel := &InMemoryChannel{ - endpointAddress: config.EndpointUrl, - collectChan: make(chan Telemetry), - controlChan: make(chan *inMemoryChannelControl), - batchSize: config.MaxBatchSize, - batchInterval: config.MaxBatchInterval, - throttle: newThrottleManager(), - transmitter: newTransmitter(config.EndpointUrl), - } - - go channel.acceptLoop() - - return channel -} - -func (channel *InMemoryChannel) EndpointAddress() string { - return channel.endpointAddress -} - -func (channel *InMemoryChannel) Send(item Telemetry) { - if item != nil && channel.collectChan != nil { - channel.collectChan <- item - } -} - -func (channel *InMemoryChannel) Flush() { - if channel.controlChan != nil { - channel.controlChan <- &inMemoryChannelControl{ - flush: true, - } - } -} - -func (channel *InMemoryChannel) Stop() { - if channel.controlChan != nil { - channel.controlChan <- &inMemoryChannelControl{ - stop: true, - } - } -} - -func (channel *InMemoryChannel) IsThrottled() bool { - return channel.throttle != nil && channel.throttle.IsThrottled() -} - -func (channel *InMemoryChannel) Close(timeout ...time.Duration) <-chan struct{} { - if channel.controlChan != nil { - callback := make(chan struct{}) - - ctl := &inMemoryChannelControl{ - stop: true, - flush: true, - retry: false, - callback: callback, - } - - if len(timeout) > 0 { - ctl.retry = true - ctl.timeout = timeout[0] - } - - channel.controlChan <- ctl - - return callback - } else { - return nil - } -} - -func (channel *InMemoryChannel) acceptLoop() { - channelState := newInMemoryChannelState(channel) - - for !channelState.stopping { - channelState.start() - } - - channelState.stop() -} - -// Data shared between parts of a channel -type inMemoryChannelState struct { - channel *InMemoryChannel - stopping bool - buffer TelemetryBufferItems - retry bool - retryTimeout time.Duration - callback chan struct{} - timer clock.Timer -} - -func newInMemoryChannelState(channel *InMemoryChannel) *inMemoryChannelState { - return &inMemoryChannelState{ - channel: channel, - buffer: make(TelemetryBufferItems, 0, 16), - stopping: false, - timer: currentClock.NewTimer(channel.batchInterval), - } -} - -// Part of channel accept loop: Initialize buffer and accept first message, handle controls. -func (state *inMemoryChannelState) start() bool { - if len(state.buffer) > 16 { - // Start out with the size of the previous buffer - state.buffer = make(TelemetryBufferItems, 0, cap(state.buffer)) - } else if len(state.buffer) > 0 { - // Start out with at least 16 slots - state.buffer = make(TelemetryBufferItems, 0, 16) - } - - // Wait for an event - select { - case event := <-state.channel.collectChan: - if event == nil { - // Channel closed? Not intercepted by Send()? - panic("Received nil event") - } - - state.buffer = append(state.buffer, event) - - case ctl := <-state.channel.controlChan: - // The buffer is empty, so there would be no point in flushing - state.channel.signalWhenDone(ctl.callback) - - if ctl.stop { - state.stopping = true - return false - } - } - - if len(state.buffer) == 0 { - return true - } - - return state.waitToSend() -} - -// Part of channel accept loop: Wait for buffer to fill, timeout to expire, or flush -func (state *inMemoryChannelState) waitToSend() bool { - // Things that are used by the sender if we receive a control message - state.retryTimeout = 0 - state.retry = true - state.callback = nil - - // Delay until timeout passes or buffer fills up - state.timer.Reset(state.channel.batchInterval) - for { - select { - case event := <-state.channel.collectChan: - if event == nil { - // Channel closed? Not intercepted by Send()? - panic("Received nil event") - } - - state.buffer = append(state.buffer, event) - if len(state.buffer) >= state.channel.batchSize { - return state.send() - } - - case ctl := <-state.channel.controlChan: - if ctl.stop { - state.stopping = true - state.retry = ctl.retry - if !ctl.flush { - // No flush? Just exit. - state.channel.signalWhenDone(ctl.callback) - return false - } - } - - if ctl.flush { - state.retryTimeout = ctl.timeout - state.callback = ctl.callback - return state.send() - } - - case _ = <-state.timer.C(): - // Timeout expired - return state.send() - } - } -} - -// Part of channel accept loop: Check and wait on throttle, submit pending telemetry -func (state *inMemoryChannelState) send() bool { - // Hold up transmission if we're being throttled - if !state.stopping && state.channel.throttle.IsThrottled() { - if !state.waitThrottle() { - // Stopped - return false - } - } - - // Send - if len(state.buffer) > 0 { - state.channel.waitgroup.Add(1) - - // If we have a callback, wait on the waitgroup now that it's - // incremented. - state.channel.signalWhenDone(state.callback) - - go func(buffer TelemetryBufferItems, retry bool, retryTimeout time.Duration) { - defer state.channel.waitgroup.Done() - state.channel.transmitRetry(buffer, retry, retryTimeout) - }(state.buffer, state.retry, state.retryTimeout) - } else if state.callback != nil { - state.channel.signalWhenDone(state.callback) - } - - return true -} - -// Part of channel accept loop: Wait for throttle to expire while dropping messages -func (state *inMemoryChannelState) waitThrottle() bool { - // Channel is currently throttled. Once the buffer fills, messages will - // be lost... If we're exiting, then we'll just try to submit anyway. That - // request may be throttled and transmitRetry will perform the backoff correctly. - - diagnosticsWriter.Write("Channel is throttled, events may be dropped.") - throttleDone := state.channel.throttle.NotifyWhenReady() - dropped := 0 - - defer diagnosticsWriter.Printf("Channel dropped %d events while throttled", dropped) - - for { - select { - case <-throttleDone: - close(throttleDone) - return true - - case event := <-state.channel.collectChan: - // If there's still room in the buffer, then go ahead and add it. - if len(state.buffer) < state.channel.batchSize { - state.buffer = append(state.buffer, event) - } else { - if dropped == 0 { - diagnosticsWriter.Write("Buffer is full, dropping further events.") - } - - dropped++ - } - - case ctl := <-state.channel.controlChan: - if ctl.stop { - state.stopping = true - state.retry = ctl.retry - if !ctl.flush { - state.channel.signalWhenDone(ctl.callback) - return false - } else { - // Make an exception when stopping - return true - } - } - - // Cannot flush - // TODO: Figure out what to do about callback? - if ctl.flush { - state.channel.signalWhenDone(ctl.callback) - } - } - } -} - -// Part of channel accept loop: Clean up and close telemetry channel -func (state *inMemoryChannelState) stop() { - close(state.channel.collectChan) - close(state.channel.controlChan) - - state.channel.collectChan = nil - state.channel.controlChan = nil - - // Throttle can't close until transmitters are done using it. - state.channel.waitgroup.Wait() - state.channel.throttle.Stop() - - state.channel.throttle = nil -} - -func (channel *InMemoryChannel) transmitRetry(items TelemetryBufferItems, retry bool, retryTimeout time.Duration) { - payload := items.serialize() - retryTimeRemaining := retryTimeout - - for _, wait := range submit_retries { - result, err := channel.transmitter.Transmit(payload, items) - if err == nil && result != nil && result.IsSuccess() { - return - } - - if !retry { - diagnosticsWriter.Write("Refusing to retry telemetry submission (retry==false)") - return - } - - // Check for success, determine if we need to retry anything - if result != nil { - if result.CanRetry() { - // Filter down to failed items - payload, items = result.GetRetryItems(payload, items) - if len(payload) == 0 || len(items) == 0 { - return - } - } else { - diagnosticsWriter.Write("Cannot retry telemetry submission") - return - } - - // Check for throttling - if result.IsThrottled() { - if result.retryAfter != nil { - diagnosticsWriter.Printf("Channel is throttled until %s", *result.retryAfter) - channel.throttle.RetryAfter(*result.retryAfter) - } else { - // TODO: Pick a time - } - } - } - - if retryTimeout > 0 { - // We're on a time schedule here. Make sure we don't try longer - // than we have been allowed. - if retryTimeRemaining < wait { - // One more chance left -- we'll wait the max time we can - // and then retry on the way out. - currentClock.Sleep(retryTimeRemaining) - break - } else { - // Still have time left to go through the rest of the regular - // retry schedule - retryTimeRemaining -= wait - } - } - - diagnosticsWriter.Printf("Waiting %s to retry submission", wait) - currentClock.Sleep(wait) - - // Wait if the channel is throttled and we're not on a schedule - if channel.IsThrottled() && retryTimeout == 0 { - diagnosticsWriter.Printf("Channel is throttled; extending wait time.") - ch := channel.throttle.NotifyWhenReady() - result := <-ch - close(ch) - - if !result { - return - } - } - } - - // One final try - _, err := channel.transmitter.Transmit(payload, items) - if err != nil { - diagnosticsWriter.Write("Gave up transmitting payload; exhausted retries") - } -} - -func (channel *InMemoryChannel) signalWhenDone(callback chan struct{}) { - if callback != nil { - go func() { - channel.waitgroup.Wait() - close(callback) - }() - } -} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/jsonserializer.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/jsonserializer.go deleted file mode 100644 index 326812310..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/jsonserializer.go +++ /dev/null @@ -1,45 +0,0 @@ -package appinsights - -import ( - "bytes" - "encoding/json" - "fmt" - "time" -) - -func (items TelemetryBufferItems) serialize() []byte { - var result bytes.Buffer - encoder := json.NewEncoder(&result) - - for _, item := range items { - end := result.Len() - if err := encoder.Encode(prepare(item)); err != nil { - diagnosticsWriter.Write(fmt.Sprintf("Telemetry item failed to serialize: %s", err.Error())) - result.Truncate(end) - } - } - - return result.Bytes() -} - -func prepare(item Telemetry) *envelope { - data := &data{ - BaseType: item.baseTypeName() + "Data", - BaseData: item.baseData(), - } - - context := item.Context() - - envelope := &envelope{ - Name: "Microsoft.ApplicationInsights." + item.baseTypeName(), - Time: item.Timestamp().Format(time.RFC3339), - IKey: context.InstrumentationKey(), - Data: data, - } - - if tcontext, ok := context.(*telemetryContext); ok { - envelope.Tags = tcontext.tags - } - - return envelope -} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/package.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/package.go deleted file mode 100644 index f498c1be7..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/package.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package appinsights provides an interface to submit telemetry to Application Insights. -// See more at https://azure.microsoft.com/en-us/services/application-insights/ -package appinsights - -const ( - sdkName = "go" - Version = "0.3.1-pre" -) diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetrychannel.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetrychannel.go deleted file mode 100644 index 01b3914a0..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetrychannel.go +++ /dev/null @@ -1,47 +0,0 @@ -package appinsights - -import "time" - -// Implementations of TelemetryChannel are responsible for queueing and -// periodically submitting telemetry items. -type TelemetryChannel interface { - // The address of the endpoint to which telemetry is sent - EndpointAddress() string - - // Queues a single telemetry item - Send(Telemetry) - - // Forces the current queue to be sent - Flush() - - // Tears down the submission goroutines, closes internal channels. - // Any telemetry waiting to be sent is discarded. Further calls to - // Send() have undefined behavior. This is a more abrupt version of - // Close(). - Stop() - - // Returns true if this channel has been throttled by the data - // collector. - IsThrottled() bool - - // Flushes and tears down the submission goroutine and closes - // internal channels. Returns a channel that is closed when all - // pending telemetry items have been submitted and it is safe to - // shut down without losing telemetry. - // - // If retryTimeout is specified and non-zero, then failed - // submissions will be retried until one succeeds or the timeout - // expires, whichever occurs first. A retryTimeout of zero - // indicates that failed submissions will be retried as usual. An - // omitted retryTimeout indicates that submissions should not be - // retried if they fail. - // - // Note that the returned channel may not be closed before - // retryTimeout even if it is specified. This is because - // retryTimeout only applies to the latest telemetry buffer. This - // may be typical for applications that submit a large amount of - // telemetry or are prone to being throttled. When exiting, you - // should select on the result channel and your own timer to avoid - // long delays. - Close(retryTimeout ...time.Duration) <-chan struct{} -} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetrycontext.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetrycontext.go deleted file mode 100644 index 7a081e033..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetrycontext.go +++ /dev/null @@ -1,400 +0,0 @@ -package appinsights - -import ( - "os" - "runtime" - "strconv" -) - -type TelemetryContext interface { - InstrumentationKey() string - loadDeviceContext() - - Component() ComponentContext - Device() DeviceContext - Cloud() CloudContext - Session() SessionContext - User() UserContext - Operation() OperationContext - Location() LocationContext -} - -type telemetryContext struct { - iKey string - tags map[string]string -} - -type ComponentContext interface { - GetVersion() string - SetVersion(string) -} - -type DeviceContext interface { - GetType() string - SetType(string) - GetId() string - SetId(string) - GetOperatingSystem() string - SetOperatingSystem(string) - GetOemName() string - SetOemName(string) - GetModel() string - SetModel(string) - GetNetworkType() string - SetNetworkType(string) - GetScreenResolution() string - SetScreenResolution(string) - GetLanguage() string - SetLanguage(string) -} - -type CloudContext interface { - GetRoleName() string - SetRoleName(string) - GetRoleInstance() string - SetRoleInstance(string) -} - -type SessionContext interface { - GetId() string - SetId(string) - GetIsFirst() bool - SetIsFirst(bool) -} - -type UserContext interface { - GetId() string - SetId(string) - GetAccountId() string - SetAccountId(string) - GetUserAgent() string - SetUserAgent(string) - GetAuthenticatedUserId() string - SetAuthenticatedUserId(string) -} - -type OperationContext interface { - GetId() string - SetId(string) - GetParentId() string - SetParentId(string) - GetCorrelationVector() string - SetCorrelationVector(string) - GetName() string - SetName(string) - GetSyntheticSource() string - SetSyntheticSource(string) -} - -type LocationContext interface { - GetIp() string - SetIp(string) -} - -func NewItemTelemetryContext() TelemetryContext { - context := &telemetryContext{ - tags: make(map[string]string), - } - return context -} - -func NewClientTelemetryContext() TelemetryContext { - context := &telemetryContext{ - tags: make(map[string]string), - } - context.loadDeviceContext() - context.loadInternalContext() - return context -} - -func (context *telemetryContext) InstrumentationKey() string { - return context.iKey -} - -func (context *telemetryContext) loadDeviceContext() { - hostname, err := os.Hostname() - if err == nil { - context.tags[DeviceId] = hostname - context.tags[DeviceMachineName] = hostname - context.tags[DeviceRoleInstance] = hostname - } - context.tags[DeviceOS] = runtime.GOOS -} - -func (context *telemetryContext) loadInternalContext() { - context.tags[InternalSdkVersion] = sdkName + ":" + Version -} - -func (context *telemetryContext) Component() ComponentContext { - return &componentContext{context: context} -} - -func (context *telemetryContext) Device() DeviceContext { - return &deviceContext{context: context} -} - -func (context *telemetryContext) Cloud() CloudContext { - return &cloudContext{context: context} -} - -func (context *telemetryContext) Session() SessionContext { - return &sessionContext{context: context} -} - -func (context *telemetryContext) User() UserContext { - return &userContext{context: context} -} - -func (context *telemetryContext) Operation() OperationContext { - return &operationContext{context: context} -} - -func (context *telemetryContext) Location() LocationContext { - return &locationContext{context: context} -} - -func (context *telemetryContext) getTagString(key ContextTagKeys) string { - if val, ok := context.tags[string(key)]; ok { - return val - } - - return "" -} - -func (context *telemetryContext) setTagString(key ContextTagKeys, value string) { - if value != "" { - context.tags[string(key)] = value - } else { - delete(context.tags, string(key)) - } -} - -func (context *telemetryContext) getTagBool(key ContextTagKeys) bool { - if val, ok := context.tags[string(key)]; ok { - if b, err := strconv.ParseBool(val); err != nil { - return b - } - } - - return false -} - -func (context *telemetryContext) setTagBool(key ContextTagKeys, value bool) { - if value { - context.tags[string(key)] = "true" - } else { - delete(context.tags, string(key)) - } -} - -type componentContext struct { - context *telemetryContext -} - -type deviceContext struct { - context *telemetryContext -} - -type cloudContext struct { - context *telemetryContext -} - -type sessionContext struct { - context *telemetryContext -} - -type userContext struct { - context *telemetryContext -} - -type operationContext struct { - context *telemetryContext -} - -type locationContext struct { - context *telemetryContext -} - -func (context *componentContext) GetVersion() string { - return context.context.getTagString(ApplicationVersion) -} - -func (context *componentContext) SetVersion(value string) { - context.context.setTagString(ApplicationVersion, value) -} - -func (context *deviceContext) GetType() string { - return context.context.getTagString(DeviceType) -} - -func (context *deviceContext) SetType(value string) { - context.context.setTagString(DeviceType, value) -} - -func (context *deviceContext) GetId() string { - return context.context.getTagString(DeviceId) -} - -func (context *deviceContext) SetId(value string) { - context.context.setTagString(DeviceId, value) -} - -func (context *deviceContext) GetOperatingSystem() string { - return context.context.getTagString(DeviceOSVersion) -} - -func (context *deviceContext) SetOperatingSystem(value string) { - context.context.setTagString(DeviceOSVersion, value) -} - -func (context *deviceContext) GetOemName() string { - return context.context.getTagString(DeviceOEMName) -} - -func (context *deviceContext) SetOemName(value string) { - context.context.setTagString(DeviceOEMName, value) -} - -func (context *deviceContext) GetModel() string { - return context.context.getTagString(DeviceModel) -} - -func (context *deviceContext) SetModel(value string) { - context.context.setTagString(DeviceModel, value) -} - -func (context *deviceContext) GetNetworkType() string { - return context.context.getTagString(DeviceNetwork) -} - -func (context *deviceContext) SetNetworkType(value string) { - context.context.setTagString(DeviceNetwork, value) -} - -func (context *deviceContext) GetScreenResolution() string { - return context.context.getTagString(DeviceScreenResolution) -} - -func (context *deviceContext) SetScreenResolution(value string) { - context.context.setTagString(DeviceScreenResolution, value) -} - -func (context *deviceContext) GetLanguage() string { - return context.context.getTagString(DeviceLanguage) -} - -func (context *deviceContext) SetLanguage(value string) { - context.context.setTagString(DeviceLanguage, value) -} - -func (context *cloudContext) GetRoleName() string { - return context.context.getTagString(CloudRole) -} - -func (context *cloudContext) SetRoleName(value string) { - context.context.setTagString(CloudRole, value) -} - -func (context *cloudContext) GetRoleInstance() string { - return context.context.getTagString(CloudRoleInstance) -} - -func (context *cloudContext) SetRoleInstance(value string) { - context.context.setTagString(CloudRoleInstance, value) -} - -func (context *sessionContext) GetId() string { - return context.context.getTagString(SessionId) -} - -func (context *sessionContext) SetId(value string) { - context.context.setTagString(SessionId, value) -} - -func (context *sessionContext) GetIsFirst() bool { - return context.context.getTagBool(SessionIsFirst) -} - -func (context *sessionContext) SetIsFirst(value bool) { - context.context.setTagBool(SessionIsFirst, value) -} - -func (context *userContext) GetId() string { - return context.context.getTagString(UserId) -} - -func (context *userContext) SetId(value string) { - context.context.setTagString(UserId, value) -} - -func (context *userContext) GetAccountId() string { - return context.context.getTagString(UserAccountId) -} - -func (context *userContext) SetAccountId(value string) { - context.context.setTagString(UserAccountId, value) -} - -func (context *userContext) GetUserAgent() string { - return context.context.getTagString(UserAgent) -} - -func (context *userContext) SetUserAgent(value string) { - context.context.setTagString(UserAgent, value) -} - -func (context *userContext) GetAuthenticatedUserId() string { - return context.context.getTagString(UserAuthUserId) -} - -func (context *userContext) SetAuthenticatedUserId(value string) { - context.context.setTagString(UserAuthUserId, value) -} - -func (context *operationContext) GetId() string { - return context.context.getTagString(OperationId) -} - -func (context *operationContext) SetId(value string) { - context.context.setTagString(OperationId, value) -} - -func (context *operationContext) GetParentId() string { - return context.context.getTagString(OperationParentId) -} - -func (context *operationContext) SetParentId(value string) { - context.context.setTagString(OperationParentId, value) -} - -func (context *operationContext) GetCorrelationVector() string { - return context.context.getTagString(OperationCorrelationVector) -} - -func (context *operationContext) SetCorrelationVector(value string) { - context.context.setTagString(OperationCorrelationVector, value) -} - -func (context *operationContext) GetName() string { - return context.context.getTagString(OperationName) -} - -func (context *operationContext) SetName(value string) { - context.context.setTagString(OperationName, value) -} - -func (context *operationContext) GetSyntheticSource() string { - return context.context.getTagString(OperationSyntheticSource) -} - -func (context *operationContext) SetSyntheticSource(value string) { - context.context.setTagString(OperationSyntheticSource, value) -} - -func (context *locationContext) GetIp() string { - return context.context.getTagString(LocationIp) -} - -func (context *locationContext) SetIp(value string) { - context.context.setTagString(LocationIp, value) -} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/throttle.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/throttle.go deleted file mode 100644 index 2c85800d1..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/throttle.go +++ /dev/null @@ -1,144 +0,0 @@ -package appinsights - -import ( - "time" -) - -type throttleManager struct { - msgs chan *throttleMessage -} - -type throttleMessage struct { - query bool - wait bool - throttle bool - stop bool - timestamp time.Time - result chan bool -} - -func newThrottleManager() *throttleManager { - result := &throttleManager{ - msgs: make(chan *throttleMessage), - } - - go result.run() - return result -} - -func (throttle *throttleManager) RetryAfter(t time.Time) { - throttle.msgs <- &throttleMessage{ - throttle: true, - timestamp: t, - } -} - -func (throttle *throttleManager) IsThrottled() bool { - ch := make(chan bool) - throttle.msgs <- &throttleMessage{ - query: true, - result: ch, - } - - result := <-ch - close(ch) - return result -} - -func (throttle *throttleManager) NotifyWhenReady() chan bool { - result := make(chan bool, 1) - throttle.msgs <- &throttleMessage{ - wait: true, - result: result, - } - - return result -} - -func (throttle *throttleManager) Stop() { - result := make(chan bool) - throttle.msgs <- &throttleMessage{ - stop: true, - result: result, - } - - <-result - close(result) -} - -func (throttle *throttleManager) run() { - for { - throttledUntil, ok := throttle.waitForThrottle() - if !ok { - break - } - - if !throttle.waitForReady(throttledUntil) { - break - } - } - - close(throttle.msgs) -} - -func (throttle *throttleManager) waitForThrottle() (time.Time, bool) { - for { - msg := <-throttle.msgs - if msg.query { - msg.result <- false - } else if msg.wait { - msg.result <- true - } else if msg.stop { - return time.Time{}, false - } else if msg.throttle { - return msg.timestamp, true - } - } -} - -func (throttle *throttleManager) waitForReady(throttledUntil time.Time) bool { - duration := throttledUntil.Sub(currentClock.Now()) - if duration <= 0 { - return true - } - - var notify []chan bool - - // --- Throttled and waiting --- - t := currentClock.NewTimer(duration) - - for { - select { - case <-t.C(): - for _, n := range notify { - n <- true - } - - return true - case msg := <-throttle.msgs: - if msg.query { - msg.result <- true - } else if msg.wait { - notify = append(notify, msg.result) - } else if msg.stop { - for _, n := range notify { - n <- false - } - - msg.result <- true - - return false - } else if msg.throttle { - if msg.timestamp.After(throttledUntil) { - throttledUntil = msg.timestamp - - if !t.Stop() { - <-t.C() - } - - t.Reset(throttledUntil.Sub(currentClock.Now())) - } - } - } - } -} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/transmitter.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/transmitter.go deleted file mode 100644 index 0aac35265..000000000 --- a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/transmitter.go +++ /dev/null @@ -1,237 +0,0 @@ -package appinsights - -import ( - "bytes" - "compress/gzip" - "encoding/json" - "io/ioutil" - "net/http" - "sort" - "time" -) - -type transmitter interface { - Transmit(payload []byte, items TelemetryBufferItems) (*transmissionResult, error) -} - -type httpTransmitter struct { - endpoint string -} - -type transmissionResult struct { - statusCode int - retryAfter *time.Time - response *backendResponse -} - -// Structures returned by data collector -type backendResponse struct { - ItemsReceived int `json:"itemsReceived"` - ItemsAccepted int `json:"itemsAccepted"` - Errors itemTransmissionResults `json:"errors"` -} - -// This needs to be its own type because it implements sort.Interface -type itemTransmissionResults []*itemTransmissionResult - -type itemTransmissionResult struct { - Index int `json:"index"` - StatusCode int `json:"statusCode"` - Message string `json:"message"` -} - -const ( - successResponse = 200 - partialSuccessResponse = 206 - requestTimeoutResponse = 408 - tooManyRequestsResponse = 429 - tooManyRequestsOverExtendedTimeResponse = 439 - errorResponse = 500 - serviceUnavailableResponse = 503 -) - -func newTransmitter(endpointAddress string) transmitter { - return &httpTransmitter{endpointAddress} -} - -func (transmitter *httpTransmitter) Transmit(payload []byte, items TelemetryBufferItems) (*transmissionResult, error) { - diagnosticsWriter.Printf("----------- Transmitting %d items ---------", len(items)) - startTime := time.Now() - - // Compress the payload - var postBody bytes.Buffer - gzipWriter := gzip.NewWriter(&postBody) - if _, err := gzipWriter.Write(payload); err != nil { - diagnosticsWriter.Printf("Failed to compress the payload: %s", err.Error()) - gzipWriter.Close() - return nil, err - } - - gzipWriter.Close() - - req, err := http.NewRequest("POST", transmitter.endpoint, &postBody) - if err != nil { - return nil, err - } - - req.Header.Set("Content-Encoding", "gzip") - req.Header.Set("Content-Type", "application/x-json-stream") - req.Header.Set("Accept-Encoding", "gzip, deflate") - - client := http.DefaultClient - resp, err := client.Do(req) - if err != nil { - diagnosticsWriter.Printf("Failed to transmit telemetry: %s", err.Error()) - return nil, err - } - - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - diagnosticsWriter.Printf("Failed to read response from server: %s", err.Error()) - return nil, err - } - - duration := time.Since(startTime) - - result := &transmissionResult{statusCode: resp.StatusCode} - - // Grab Retry-After header - if retryAfterValue, ok := resp.Header[http.CanonicalHeaderKey("Retry-After")]; ok && len(retryAfterValue) == 1 { - if retryAfterTime, err := time.Parse(time.RFC1123, retryAfterValue[0]); err == nil { - result.retryAfter = &retryAfterTime - } - } - - // Parse body, if possible - response := &backendResponse{} - if err := json.Unmarshal(body, &response); err == nil { - result.response = response - } - - // Write diagnostics - if diagnosticsWriter.hasListeners() { - diagnosticsWriter.Printf("Telemetry transmitted in %s", duration) - diagnosticsWriter.Printf("Response: %d", result.statusCode) - if result.response != nil { - diagnosticsWriter.Printf("Items accepted/received: %d/%d", result.response.ItemsAccepted, result.response.ItemsReceived) - if len(result.response.Errors) > 0 { - diagnosticsWriter.Printf("Errors:") - for _, err := range result.response.Errors { - if err.Index < len(items) { - diagnosticsWriter.Printf("#%d - %d %s", err.Index, err.StatusCode, err.Message) - diagnosticsWriter.Printf("Telemetry item:\n\t%s", err.Index, string(items[err.Index:err.Index+1].serialize())) - } - } - } - } - } - - return result, nil -} - -func (result *transmissionResult) IsSuccess() bool { - return result.statusCode == successResponse || - // Partial response but all items accepted - (result.statusCode == partialSuccessResponse && - result.response != nil && - result.response.ItemsReceived == result.response.ItemsAccepted) -} - -func (result *transmissionResult) IsFailure() bool { - return result.statusCode != successResponse && result.statusCode != partialSuccessResponse -} - -func (result *transmissionResult) CanRetry() bool { - if result.IsSuccess() { - return false - } - - return result.statusCode == partialSuccessResponse || - result.retryAfter != nil || - (result.statusCode == requestTimeoutResponse || - result.statusCode == serviceUnavailableResponse || - result.statusCode == errorResponse || - result.statusCode == tooManyRequestsResponse || - result.statusCode == tooManyRequestsOverExtendedTimeResponse) -} - -func (result *transmissionResult) IsPartialSuccess() bool { - return result.statusCode == partialSuccessResponse && - result.response != nil && - result.response.ItemsReceived != result.response.ItemsAccepted -} - -func (result *transmissionResult) IsThrottled() bool { - return result.statusCode == tooManyRequestsResponse || - result.statusCode == tooManyRequestsOverExtendedTimeResponse || - result.retryAfter != nil -} - -func (result *itemTransmissionResult) CanRetry() bool { - return result.StatusCode == requestTimeoutResponse || - result.StatusCode == serviceUnavailableResponse || - result.StatusCode == errorResponse || - result.StatusCode == tooManyRequestsResponse || - result.StatusCode == tooManyRequestsOverExtendedTimeResponse -} - -func (result *transmissionResult) GetRetryItems(payload []byte, items TelemetryBufferItems) ([]byte, TelemetryBufferItems) { - if result.statusCode == partialSuccessResponse && result.response != nil { - // Make sure errors are ordered by index - sort.Sort(result.response.Errors) - - var resultPayload bytes.Buffer - resultItems := make(TelemetryBufferItems, 0) - ptr := 0 - idx := 0 - - // Find each retryable error - for _, responseResult := range result.response.Errors { - if responseResult.CanRetry() { - // Advance ptr to start of desired line - for ; idx < responseResult.Index && ptr < len(payload); ptr++ { - if payload[ptr] == '\n' { - idx++ - } - } - - startPtr := ptr - - // Read to end of line - for ; idx == responseResult.Index && ptr < len(payload); ptr++ { - if payload[ptr] == '\n' { - idx++ - } - } - - // Copy item into output buffer - resultPayload.Write(payload[startPtr:ptr]) - resultItems = append(resultItems, items[responseResult.Index]) - } - } - - return resultPayload.Bytes(), resultItems - } else if result.CanRetry() { - return payload, items - } else { - return payload[:0], items[:0] - } -} - -// sort.Interface implementation for Errors[] list - -func (results itemTransmissionResults) Len() int { - return len(results) -} - -func (results itemTransmissionResults) Less(i, j int) bool { - return results[i].Index < results[j].Index -} - -func (results itemTransmissionResults) Swap(i, j int) { - tmp := results[i] - results[i] = results[j] - results[j] = tmp -} diff --git a/vendor/github.com/containous/alice/LICENSE b/vendor/github.com/containous/alice/LICENSE new file mode 100644 index 000000000..0d0d352ec --- /dev/null +++ b/vendor/github.com/containous/alice/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Justinas Stankevicius + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/containous/alice/chain.go b/vendor/github.com/containous/alice/chain.go new file mode 100644 index 000000000..3bd132120 --- /dev/null +++ b/vendor/github.com/containous/alice/chain.go @@ -0,0 +1,116 @@ +// Package alice provides a convenient way to chain http handlers. +package alice + +import "net/http" + +// Constructor A constructor for a piece of middleware. +// Some middleware use this constructor out of the box, +// so in most cases you can just pass somepackage.New +type Constructor func(http.Handler) (http.Handler, error) + +// Chain acts as a list of http.Handler constructors. +// Chain is effectively immutable: +// once created, it will always hold +// the same set of constructors in the same order. +type Chain struct { + constructors []Constructor +} + +// New creates a new chain, +// memorizing the given list of middleware constructors. +// New serves no other function, +// constructors are only called upon a call to Then(). +func New(constructors ...Constructor) Chain { + return Chain{constructors: constructors} +} + +// Then chains the middleware and returns the final http.Handler. +// New(m1, m2, m3).Then(h) +// is equivalent to: +// m1(m2(m3(h))) +// When the request comes in, it will be passed to m1, then m2, then m3 +// and finally, the given handler +// (assuming every middleware calls the following one). +// +// A chain can be safely reused by calling Then() several times. +// stdStack := alice.New(ratelimitHandler, csrfHandler) +// indexPipe = stdStack.Then(indexHandler) +// authPipe = stdStack.Then(authHandler) +// Note that constructors are called on every call to Then() +// and thus several instances of the same middleware will be created +// when a chain is reused in this way. +// For proper middleware, this should cause no problems. +// +// Then() treats nil as http.DefaultServeMux. +func (c Chain) Then(h http.Handler) (http.Handler, error) { + if h == nil { + h = http.DefaultServeMux + } + + for i := range c.constructors { + handler, err := c.constructors[len(c.constructors)-1-i](h) + if err != nil { + return nil, err + } + h = handler + } + + return h, nil +} + +// ThenFunc works identically to Then, but takes +// a HandlerFunc instead of a Handler. +// +// The following two statements are equivalent: +// c.Then(http.HandlerFunc(fn)) +// c.ThenFunc(fn) +// +// ThenFunc provides all the guarantees of Then. +func (c Chain) ThenFunc(fn http.HandlerFunc) (http.Handler, error) { + if fn == nil { + return c.Then(nil) + } + return c.Then(fn) +} + +// Append extends a chain, adding the specified constructors +// as the last ones in the request flow. +// +// Append returns a new chain, leaving the original one untouched. +// +// stdChain := alice.New(m1, m2) +// extChain := stdChain.Append(m3, m4) +// // requests in stdChain go m1 -> m2 +// // requests in extChain go m1 -> m2 -> m3 -> m4 +func (c Chain) Append(constructors ...Constructor) Chain { + newCons := make([]Constructor, 0, len(c.constructors)+len(constructors)) + newCons = append(newCons, c.constructors...) + newCons = append(newCons, constructors...) + + return Chain{newCons} +} + +// Extend extends a chain by adding the specified chain +// as the last one in the request flow. +// +// Extend returns a new chain, leaving the original one untouched. +// +// stdChain := alice.New(m1, m2) +// ext1Chain := alice.New(m3, m4) +// ext2Chain := stdChain.Extend(ext1Chain) +// // requests in stdChain go m1 -> m2 +// // requests in ext1Chain go m3 -> m4 +// // requests in ext2Chain go m1 -> m2 -> m3 -> m4 +// +// Another example: +// aHtmlAfterNosurf := alice.New(m2) +// aHtml := alice.New(m1, func(h http.Handler) http.Handler { +// csrf := nosurf.New(h) +// csrf.SetFailureHandler(aHtmlAfterNosurf.ThenFunc(csrfFail)) +// return csrf +// }).Extend(aHtmlAfterNosurf) +// // requests to aHtml hitting nosurfs success handler go m1 -> nosurf -> m2 -> target-handler +// // requests to aHtml hitting nosurfs failure handler go m1 -> nosurf -> m2 -> csrfFail +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.constructors...) +} diff --git a/vendor/github.com/containous/traefik-extra-service-fabric/LICENSE b/vendor/github.com/containous/traefik-extra-service-fabric/LICENSE deleted file mode 100644 index 63ea16409..000000000 --- a/vendor/github.com/containous/traefik-extra-service-fabric/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2017 Containous - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containous/traefik-extra-service-fabric/servicefabric.go b/vendor/github.com/containous/traefik-extra-service-fabric/servicefabric.go deleted file mode 100644 index bb7c3fb37..000000000 --- a/vendor/github.com/containous/traefik-extra-service-fabric/servicefabric.go +++ /dev/null @@ -1,306 +0,0 @@ -package servicefabric - -import ( - "encoding/json" - "errors" - "net/http" - "strings" - "time" - - "github.com/cenk/backoff" - "github.com/containous/flaeg" - "github.com/containous/traefik/job" - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/safe" - "github.com/containous/traefik/types" - "github.com/jjcollinge/logrus-appinsights" - sf "github.com/jjcollinge/servicefabric" -) - -var _ provider.Provider = (*Provider)(nil) - -const traefikServiceFabricExtensionKey = "Traefik" - -const ( - kindStateful = "Stateful" - kindStateless = "Stateless" -) - -// Provider holds for configuration for the provider -type Provider struct { - provider.BaseProvider `mapstructure:",squash"` - ClusterManagementURL string `description:"Service Fabric API endpoint"` - APIVersion string `description:"Service Fabric API version" export:"true"` - RefreshSeconds flaeg.Duration `description:"Polling interval (in seconds)" export:"true"` - TLS *types.ClientTLS `description:"Enable TLS support" export:"true"` - AppInsightsClientName string `description:"The client name, Identifies the cloud instance"` - AppInsightsKey string `description:"Application Insights Instrumentation Key"` - AppInsightsBatchSize int `description:"Number of trace lines per batch, optional"` - AppInsightsInterval flaeg.Duration `description:"The interval for sending data to Application Insights, optional"` - sfClient sfClient -} - -// Init the provider -func (p *Provider) Init(constraints types.Constraints) error { - err := p.BaseProvider.Init(constraints) - if err != nil { - return err - } - - if p.APIVersion == "" { - p.APIVersion = sf.DefaultAPIVersion - } - - tlsConfig, err := p.TLS.CreateTLSConfig() - if err != nil { - return err - } - - p.sfClient, err = sf.NewClient(http.DefaultClient, p.ClusterManagementURL, p.APIVersion, tlsConfig) - if err != nil { - return err - } - - if p.RefreshSeconds <= 0 { - p.RefreshSeconds = flaeg.Duration(10 * time.Second) - } - - if p.AppInsightsClientName != "" && p.AppInsightsKey != "" { - if p.AppInsightsBatchSize == 0 { - p.AppInsightsBatchSize = 10 - } - if p.AppInsightsInterval == 0 { - p.AppInsightsInterval = flaeg.Duration(5 * time.Second) - } - createAppInsightsHook(p.AppInsightsClientName, p.AppInsightsKey, p.AppInsightsBatchSize, p.AppInsightsInterval) - } - return nil -} - -// Provide allows the ServiceFabric provider to provide configurations to traefik -// using the given configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - return p.updateConfig(configurationChan, pool, time.Duration(p.RefreshSeconds)) -} - -func (p *Provider) updateConfig(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, pollInterval time.Duration) error { - pool.Go(func(stop chan bool) { - operation := func() error { - ticker := time.NewTicker(pollInterval) - for range ticker.C { - select { - case shouldStop := <-stop: - if shouldStop { - ticker.Stop() - return nil - } - default: - log.Info("Checking service fabric config") - } - - configuration, err := p.getConfiguration() - if err != nil { - return err - } - - configurationChan <- types.ConfigMessage{ - ProviderName: "servicefabric", - Configuration: configuration, - } - } - return nil - } - - notify := func(err error, time time.Duration) { - log.Errorf("Provider connection error: %v; retrying in %s", err, time) - } - err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify) - if err != nil { - log.Errorf("Cannot connect to Provider: %v", err) - } - }) - return nil -} - -func (p *Provider) getConfiguration() (*types.Configuration, error) { - services, err := getClusterServices(p.sfClient) - if err != nil { - return nil, err - } - - return p.buildConfiguration(services) -} - -func getClusterServices(sfClient sfClient) ([]ServiceItemExtended, error) { - apps, err := sfClient.GetApplications() - if err != nil { - return nil, err - } - - var results []ServiceItemExtended - for _, app := range apps.Items { - services, err := sfClient.GetServices(app.ID) - if err != nil { - return nil, err - } - - for _, service := range services.Items { - item := ServiceItemExtended{ - ServiceItem: service, - Application: app, - } - - if labels, err := getLabels(sfClient, &service, &app); err != nil { - log.Error(err) - } else { - item.Labels = labels - } - - if partitions, err := sfClient.GetPartitions(app.ID, service.ID); err != nil { - log.Error(err) - } else { - for _, partition := range partitions.Items { - partitionExt := PartitionItemExtended{PartitionItem: partition} - - if isStateful(item) { - partitionExt.Replicas = getValidReplicas(sfClient, app, service, partition) - } else if isStateless(item) { - partitionExt.Instances = getValidInstances(sfClient, app, service, partition) - } else { - log.Errorf("Unsupported service kind %s in service %s", partition.ServiceKind, service.Name) - continue - } - - item.Partitions = append(item.Partitions, partitionExt) - } - } - - results = append(results, item) - } - } - - return results, nil -} - -func getValidReplicas(sfClient sfClient, app sf.ApplicationItem, service sf.ServiceItem, partition sf.PartitionItem) []sf.ReplicaItem { - var validReplicas []sf.ReplicaItem - - if replicas, err := sfClient.GetReplicas(app.ID, service.ID, partition.PartitionInformation.ID); err != nil { - log.Error(err) - } else { - for _, instance := range replicas.Items { - if isHealthy(instance.ReplicaItemBase) && hasHTTPEndpoint(instance.ReplicaItemBase) { - validReplicas = append(validReplicas, instance) - } - } - } - return validReplicas -} - -func getValidInstances(sfClient sfClient, app sf.ApplicationItem, service sf.ServiceItem, partition sf.PartitionItem) []sf.InstanceItem { - var validInstances []sf.InstanceItem - - if instances, err := sfClient.GetInstances(app.ID, service.ID, partition.PartitionInformation.ID); err != nil { - log.Error(err) - } else { - for _, instance := range instances.Items { - if isHealthy(instance.ReplicaItemBase) && hasHTTPEndpoint(instance.ReplicaItemBase) { - validInstances = append(validInstances, instance) - } - } - } - return validInstances -} - -func isHealthy(instanceData *sf.ReplicaItemBase) bool { - return instanceData != nil && (instanceData.ReplicaStatus == "Ready" && instanceData.HealthState != "Error") -} - -func hasHTTPEndpoint(instanceData *sf.ReplicaItemBase) bool { - _, err := getReplicaDefaultEndpoint(instanceData) - return err == nil -} - -func getReplicaDefaultEndpoint(replicaData *sf.ReplicaItemBase) (string, error) { - endpoints, err := decodeEndpointData(replicaData.Address) - if err != nil { - return "", err - } - - var defaultHTTPEndpoint string - for _, v := range endpoints { - if strings.Contains(v, "http") { - defaultHTTPEndpoint = v - break - } - } - - if len(defaultHTTPEndpoint) == 0 { - return "", errors.New("no default endpoint found") - } - return defaultHTTPEndpoint, nil -} - -func decodeEndpointData(endpointData string) (map[string]string, error) { - var endpointsMap map[string]map[string]string - - if endpointData == "" { - return nil, errors.New("endpoint data is empty") - } - - err := json.Unmarshal([]byte(endpointData), &endpointsMap) - if err != nil { - return nil, err - } - - endpoints, endpointsExist := endpointsMap["Endpoints"] - if !endpointsExist { - return nil, errors.New("endpoint doesn't exist in endpoint data") - } - - return endpoints, nil -} - -func isStateful(service ServiceItemExtended) bool { - return service.ServiceKind == kindStateful -} - -func isStateless(service ServiceItemExtended) bool { - return service.ServiceKind == kindStateless -} - -// Return a set of labels from the Extension and Property manager -// Allow Extension labels to disable importing labels from the property manager. -func getLabels(sfClient sfClient, service *sf.ServiceItem, app *sf.ApplicationItem) (map[string]string, error) { - labels, err := sfClient.GetServiceExtensionMap(service, app, traefikServiceFabricExtensionKey) - if err != nil { - log.Errorf("Error retrieving serviceExtensionMap: %v", err) - return nil, err - } - - if label.GetBoolValue(labels, traefikSFEnableLabelOverrides, traefikSFEnableLabelOverridesDefault) { - if exists, properties, err := sfClient.GetProperties(service.ID); err == nil && exists { - for key, value := range properties { - labels[key] = value - } - } - } - return labels, nil -} - -func createAppInsightsHook(appInsightsClientName string, instrumentationKey string, maxBatchSize int, interval flaeg.Duration) { - hook, err := logrus_appinsights.New(appInsightsClientName, logrus_appinsights.Config{ - InstrumentationKey: instrumentationKey, - MaxBatchSize: maxBatchSize, // optional - MaxBatchInterval: time.Duration(interval), // optional - }) - if err != nil || hook == nil { - panic(err) - } - - // ignore fields - hook.AddIgnore("private") - log.AddHook(hook) -} diff --git a/vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_config.go b/vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_config.go deleted file mode 100644 index b3baa8346..000000000 --- a/vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_config.go +++ /dev/null @@ -1,174 +0,0 @@ -package servicefabric - -import ( - "errors" - "strings" - "text/template" - - "github.com/containous/traefik/log" - "github.com/containous/traefik/provider" - "github.com/containous/traefik/provider/label" - "github.com/containous/traefik/types" - sf "github.com/jjcollinge/servicefabric" -) - -func (p *Provider) buildConfiguration(services []ServiceItemExtended) (*types.Configuration, error) { - var sfFuncMap = template.FuncMap{ - // Services - "getServices": getServices, - "hasLabel": hasService, - "getLabelValue": getServiceStringLabel, - "getLabelsWithPrefix": getServiceLabelsWithPrefix, - "isPrimary": isPrimary, - "isStateful": isStateful, - "isStateless": isStateless, - "isEnabled": getFuncBoolLabel(label.TraefikEnable, false), - "getBackendName": getBackendName, - "getDefaultEndpoint": getDefaultEndpoint, - "getNamedEndpoint": getNamedEndpoint, // FIXME unused - "getApplicationParameter": getApplicationParameter, // FIXME unused - "doesAppParamContain": doesAppParamContain, // FIXME unused - "filterServicesByLabelValue": filterServicesByLabelValue, // FIXME unused - - // Backend functions - "getWeight": getFuncServiceIntLabel(label.TraefikWeight, label.DefaultWeight), - "getProtocol": getFuncServiceStringLabel(label.TraefikProtocol, label.DefaultProtocol), - "getMaxConn": getMaxConn, - "getHealthCheck": getHealthCheck, - "getCircuitBreaker": getCircuitBreaker, - "getLoadBalancer": getLoadBalancer, - - // Frontend Functions - "getPriority": getFuncServiceIntLabel(label.TraefikFrontendPriority, label.DefaultFrontendPriority), - "getPassHostHeader": getFuncServiceBoolLabel(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader), - "getPassTLSCert": getFuncBoolLabel(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert), - "getEntryPoints": getFuncServiceSliceStringLabel(label.TraefikFrontendEntryPoints), - "getBasicAuth": getFuncServiceSliceStringLabel(label.TraefikFrontendAuthBasic), - "getFrontendRules": getFuncServiceLabelWithPrefix(label.TraefikFrontendRule), - "getWhiteList": getWhiteList, - "getHeaders": getHeaders, - "getRedirect": getRedirect, - - // SF Service Grouping - "getGroupedServices": getFuncServicesGroupedByLabel(traefikSFGroupName), - "getGroupedWeight": getFuncServiceStringLabel(traefikSFGroupWeight, "1"), - } - - templateObjects := struct { - Services []ServiceItemExtended - }{ - Services: services, - } - - return p.GetConfiguration(tmpl, sfFuncMap, templateObjects) -} - -func isPrimary(instance replicaInstance) bool { - _, data := instance.GetReplicaData() - return data.ReplicaRole == "Primary" -} - -func getBackendName(service ServiceItemExtended, partition PartitionItemExtended) string { - return provider.Normalize(service.Name + partition.PartitionInformation.ID) -} - -func getDefaultEndpoint(instance replicaInstance) string { - id, data := instance.GetReplicaData() - endpoint, err := getReplicaDefaultEndpoint(data) - if err != nil { - log.Warnf("No default endpoint for replica %s in service %s endpointData: %s", id, data.Address, err) - return "" - } - return endpoint -} - -func getNamedEndpoint(instance replicaInstance, endpointName string) string { - id, data := instance.GetReplicaData() - endpoint, err := getReplicaNamedEndpoint(data, endpointName) - if err != nil { - log.Warnf("No names endpoint of %s for replica %s in endpointData: %s. Error: %v", endpointName, id, data.Address, err) - return "" - } - return endpoint -} - -func getReplicaNamedEndpoint(replicaData *sf.ReplicaItemBase, endpointName string) (string, error) { - endpoints, err := decodeEndpointData(replicaData.Address) - if err != nil { - return "", err - } - - endpoint, exists := endpoints[endpointName] - if !exists { - return "", errors.New("endpoint doesn't exist") - } - return endpoint, nil -} - -func getApplicationParameter(app sf.ApplicationItem, key string) string { - for _, param := range app.Parameters { - if param.Key == key { - return param.Value - } - } - log.Errorf("Parameter %s doesn't exist in app %s", key, app.Name) - return "" -} - -func getServices(services []ServiceItemExtended, key string) map[string][]ServiceItemExtended { - result := map[string][]ServiceItemExtended{} - for _, service := range services { - if value, exists := service.Labels[key]; exists { - if matchingServices, hasKeyAlready := result[value]; hasKeyAlready { - result[value] = append(matchingServices, service) - } else { - result[value] = []ServiceItemExtended{service} - } - } - } - return result -} - -func doesAppParamContain(app sf.ApplicationItem, key, shouldContain string) bool { - value := getApplicationParameter(app, key) - return strings.Contains(value, shouldContain) -} - -func filterServicesByLabelValue(services []ServiceItemExtended, key, expectedValue string) []ServiceItemExtended { - var srvWithLabel []ServiceItemExtended - for _, service := range services { - value, exists := service.Labels[key] - if exists && value == expectedValue { - srvWithLabel = append(srvWithLabel, service) - } - } - return srvWithLabel -} - -func getHeaders(service ServiceItemExtended) *types.Headers { - return label.GetHeaders(service.Labels) -} - -func getWhiteList(service ServiceItemExtended) *types.WhiteList { - return label.GetWhiteList(service.Labels) -} - -func getRedirect(service ServiceItemExtended) *types.Redirect { - return label.GetRedirect(service.Labels) -} - -func getMaxConn(service ServiceItemExtended) *types.MaxConn { - return label.GetMaxConn(service.Labels) -} - -func getHealthCheck(service ServiceItemExtended) *types.HealthCheck { - return label.GetHealthCheck(service.Labels) -} - -func getCircuitBreaker(service ServiceItemExtended) *types.CircuitBreaker { - return label.GetCircuitBreaker(service.Labels) -} - -func getLoadBalancer(service ServiceItemExtended) *types.LoadBalancer { - return label.GetLoadBalancer(service.Labels) -} diff --git a/vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_labelfuncs.go b/vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_labelfuncs.go deleted file mode 100644 index 663118a08..000000000 --- a/vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_labelfuncs.go +++ /dev/null @@ -1,75 +0,0 @@ -package servicefabric - -import ( - "strings" - - "github.com/containous/traefik/provider/label" -) - -// SF Specific Traefik Labels -const ( - traefikSFGroupName = "traefik.servicefabric.groupname" - traefikSFGroupWeight = "traefik.servicefabric.groupweight" - traefikSFEnableLabelOverrides = "traefik.servicefabric.enablelabeloverrides" - traefikSFEnableLabelOverridesDefault = true -) - -func getFuncBoolLabel(labelName string, defaultValue bool) func(service ServiceItemExtended) bool { - return func(service ServiceItemExtended) bool { - return label.GetBoolValue(service.Labels, labelName, defaultValue) - } -} - -func getServiceStringLabel(service ServiceItemExtended, labelName string, defaultValue string) string { - return label.GetStringValue(service.Labels, labelName, defaultValue) -} - -func getFuncServiceStringLabel(labelName string, defaultValue string) func(service ServiceItemExtended) string { - return func(service ServiceItemExtended) string { - return label.GetStringValue(service.Labels, labelName, defaultValue) - } -} - -func getFuncServiceIntLabel(labelName string, defaultValue int) func(service ServiceItemExtended) int { - return func(service ServiceItemExtended) int { - return label.GetIntValue(service.Labels, labelName, defaultValue) - } -} - -func getFuncServiceBoolLabel(labelName string, defaultValue bool) func(service ServiceItemExtended) bool { - return func(service ServiceItemExtended) bool { - return label.GetBoolValue(service.Labels, labelName, defaultValue) - } -} - -func getFuncServiceSliceStringLabel(labelName string) func(service ServiceItemExtended) []string { - return func(service ServiceItemExtended) []string { - return label.GetSliceStringValue(service.Labels, labelName) - } -} - -func hasService(service ServiceItemExtended, labelName string) bool { - return label.Has(service.Labels, labelName) -} - -func getFuncServiceLabelWithPrefix(labelName string) func(service ServiceItemExtended) map[string]string { - return func(service ServiceItemExtended) map[string]string { - return getServiceLabelsWithPrefix(service, labelName) - } -} - -func getFuncServicesGroupedByLabel(labelName string) func(services []ServiceItemExtended) map[string][]ServiceItemExtended { - return func(services []ServiceItemExtended) map[string][]ServiceItemExtended { - return getServices(services, labelName) - } -} - -func getServiceLabelsWithPrefix(service ServiceItemExtended, prefix string) map[string]string { - results := make(map[string]string) - for k, v := range service.Labels { - if strings.HasPrefix(k, prefix) { - results[k] = v - } - } - return results -} diff --git a/vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_tmpl.go b/vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_tmpl.go deleted file mode 100644 index 524a3289b..000000000 --- a/vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_tmpl.go +++ /dev/null @@ -1,228 +0,0 @@ -package servicefabric - -const tmpl = ` -[backends] -{{range $aggName, $aggServices := getGroupedServices .Services }} - [backends."{{ $aggName }}"] - {{range $service := $aggServices }} - {{range $partition := $service.Partitions }} - {{range $instance := $partition.Instances }} - [backends."{{ $aggName }}".servers."{{ $service.ID }}-{{ $instance.ID }}"] - url = "{{ getDefaultEndpoint $instance }}" - weight = {{ getGroupedWeight $service }} - {{end}} - {{end}} - {{end}} -{{end}} - -{{range $service := .Services }} - {{if isEnabled $service }} - {{range $partition := $service.Partitions }} - - {{if isStateless $service }} - - {{ $backendName := $service.Name }} - [backends."{{ $backendName }}"] - - {{ $circuitBreaker := getCircuitBreaker $service }} - {{if $circuitBreaker }} - [backends."{{ $backendName }}".circuitBreaker] - expression = "{{ $circuitBreaker.Expression }}" - {{end}} - - {{ $loadBalancer := getLoadBalancer $service }} - {{if $loadBalancer }} - [backends."{{ $backendName }}".loadBalancer] - method = "{{ $loadBalancer.Method }}" - sticky = {{ $loadBalancer.Sticky }} - {{if $loadBalancer.Stickiness }} - [backends."{{ $backendName }}".loadBalancer.stickiness] - cookieName = "{{ $loadBalancer.Stickiness.CookieName }}" - {{end}} - {{end}} - - {{ $maxConn := getMaxConn $service }} - {{if $maxConn }} - [backends."{{ $backendName }}".maxConn] - extractorFunc = "{{ $maxConn.ExtractorFunc }}" - amount = {{ $maxConn.Amount }} - {{end}} - - {{ $healthCheck := getHealthCheck $service }} - {{if $healthCheck }} - [backends."{{ $backendName }}".healthCheck] - path = "{{ $healthCheck.Path }}" - port = {{ $healthCheck.Port }} - interval = "{{ $healthCheck.Interval }}" - hostname = "{{ $healthCheck.Hostname }}" - {{if $healthCheck.Headers }} - [backends."{{ $backendName }}".healthCheck.headers] - {{range $k, $v := $healthCheck.Headers }} - {{$k}} = "{{$v}}" - {{end}} - {{end}} - {{end}} - - {{range $instance := $partition.Instances}} - [backends."{{ $service.Name }}".servers."{{ $instance.ID }}"] - url = "{{ getDefaultEndpoint $instance }}" - weight = {{ getWeight $service }} - {{end}} - - {{else if isStateful $service}} - - {{range $replica := $partition.Replicas}} - {{if isPrimary $replica}} - {{ $backendName := getBackendName $service $partition }} - [backends."{{ $backendName }}".servers."{{ $replica.ID }}"] - url = "{{ getDefaultEndpoint $replica }}" - weight = 1 - - [backends."{{$backendName}}".LoadBalancer] - method = "drr" - - {{end}} - {{end}} - - {{end}} - - {{end}} - {{end}} -{{end}} - -[frontends] -{{range $groupName, $groupServices := getGroupedServices .Services }} - {{ $service := index $groupServices 0 }} - [frontends."{{ $groupName }}"] - backend = "{{ $groupName }}" - priority = 50 - - {{range $key, $value := getFrontendRules $service }} - [frontends."{{ $groupName }}".routes."{{ $key }}"] - rule = "{{ $value }}" - {{end}} -{{end}} - -{{range $service := .Services }} - {{if isEnabled $service }} - {{ $frontendName := $service.Name }} - - {{if isStateless $service }} - - [frontends."frontend-{{ $frontendName }}"] - backend = "{{ $service.Name }}" - passHostHeader = {{ getPassHostHeader $service }} - passTLSCert = {{ getPassTLSCert $service }} - priority = {{ getPriority $service }} - - {{ $entryPoints := getEntryPoints $service }} - {{if $entryPoints }} - entryPoints = [{{range $entryPoints }} - "{{.}}", - {{end}}] - {{end}} - - {{ $basicAuth := getBasicAuth $service }} - {{if $basicAuth }} - basicAuth = [{{range $basicAuth }} - "{{.}}", - {{end}}] - {{end}} - - {{ $whitelist := getWhiteList $service }} - {{if $whitelist }} - [frontends."frontend-{{ $frontendName }}".whiteList] - sourceRange = [{{range $whitelist.SourceRange }} - "{{.}}", - {{end}}] - useXForwardedFor = {{ $whitelist.UseXForwardedFor }} - {{end}} - - {{ $redirect := getRedirect $service }} - {{if $redirect }} - [frontends."frontend-{{ $frontendName }}".redirect] - entryPoint = "{{ $redirect.EntryPoint }}" - regex = "{{ $redirect.Regex }}" - replacement = "{{ $redirect.Replacement }}" - permanent = {{ $redirect.Permanent }} - {{end}} - - {{ $headers := getHeaders $service }} - {{if $headers }} - [frontends."frontend-{{ $frontendName }}".headers] - SSLRedirect = {{ $headers.SSLRedirect }} - SSLTemporaryRedirect = {{ $headers.SSLTemporaryRedirect }} - SSLHost = "{{ $headers.SSLHost }}" - STSSeconds = {{ $headers.STSSeconds }} - STSIncludeSubdomains = {{ $headers.STSIncludeSubdomains }} - STSPreload = {{ $headers.STSPreload }} - ForceSTSHeader = {{ $headers.ForceSTSHeader }} - FrameDeny = {{ $headers.FrameDeny }} - CustomFrameOptionsValue = "{{ $headers.CustomFrameOptionsValue }}" - ContentTypeNosniff = {{ $headers.ContentTypeNosniff }} - BrowserXSSFilter = {{ $headers.BrowserXSSFilter }} - CustomBrowserXSSValue = "{{ $headers.CustomBrowserXSSValue }}" - ContentSecurityPolicy = "{{ $headers.ContentSecurityPolicy }}" - PublicKey = "{{ $headers.PublicKey }}" - ReferrerPolicy = "{{ $headers.ReferrerPolicy }}" - IsDevelopment = {{ $headers.IsDevelopment }} - - {{if $headers.AllowedHosts }} - AllowedHosts = [{{range $headers.AllowedHosts }} - "{{.}}", - {{end}}] - {{end}} - - {{if $headers.HostsProxyHeaders }} - HostsProxyHeaders = [{{range $headers.HostsProxyHeaders }} - "{{.}}", - {{end}}] - {{end}} - - {{if $headers.CustomRequestHeaders }} - [frontends."frontend-{{ $frontendName }}".headers.customRequestHeaders] - {{range $k, $v := $headers.CustomRequestHeaders }} - {{$k}} = "{{$v}}" - {{end}} - {{end}} - - {{if $headers.CustomResponseHeaders }} - [frontends."frontend-{{ $frontendName }}".headers.customResponseHeaders] - {{range $k, $v := $headers.CustomResponseHeaders }} - {{$k}} = "{{$v}}" - {{end}} - {{end}} - - {{if $headers.SSLProxyHeaders }} - [frontends."frontend-{{ $frontendName }}".headers.SSLProxyHeaders] - {{range $k, $v := $headers.SSLProxyHeaders }} - {{$k}} = "{{$v}}" - {{end}} - {{end}} - {{end}} - - {{range $key, $value := getFrontendRules $service }} - [frontends."frontend-{{ $frontendName }}".routes."{{ $key }}"] - rule = "{{ $value }}" - {{end}} - - {{else if isStateful $service }} - - {{range $partition := $service.Partitions }} - {{ $partitionId := $partition.PartitionInformation.ID }} - - {{ $rule := getLabelValue $service (print "traefik.frontend.rule.partition." $partitionId) "" }} - {{if $rule }} - [frontends."{{ $service.Name }}/{{ $partitionId }}"] - backend = "{{ getBackendName $service $partition }}" - - [frontends."{{ $service.Name }}/{{ $partitionId }}".routes.default] - rule = "{{ $rule }}" - {{end}} - {{end}} - - {{end}} - - {{end}} -{{end}} -` diff --git a/vendor/github.com/containous/traefik-extra-service-fabric/types.go b/vendor/github.com/containous/traefik-extra-service-fabric/types.go deleted file mode 100644 index beb1e8dbf..000000000 --- a/vendor/github.com/containous/traefik-extra-service-fabric/types.go +++ /dev/null @@ -1,42 +0,0 @@ -package servicefabric - -import ( - sf "github.com/jjcollinge/servicefabric" -) - -// ServiceItemExtended provides a flattened view -// of the service with details of the application -// it belongs too and the replicas/partitions -type ServiceItemExtended struct { - sf.ServiceItem - Application sf.ApplicationItem - Partitions []PartitionItemExtended - Labels map[string]string -} - -// PartitionItemExtended provides a flattened view -// of a services partitions -type PartitionItemExtended struct { - sf.PartitionItem - Replicas []sf.ReplicaItem - Instances []sf.InstanceItem -} - -// sfClient is an interface for Service Fabric client's to implement. -// This is purposely a subset of the total Service Fabric API surface. -type sfClient interface { - GetApplications() (*sf.ApplicationItemsPage, error) - GetServices(appName string) (*sf.ServiceItemsPage, error) - GetPartitions(appName, serviceName string) (*sf.PartitionItemsPage, error) - GetReplicas(appName, serviceName, partitionName string) (*sf.ReplicaItemsPage, error) - GetInstances(appName, serviceName, partitionName string) (*sf.InstanceItemsPage, error) - GetServiceExtensionMap(service *sf.ServiceItem, app *sf.ApplicationItem, extensionKey string) (map[string]string, error) - GetServiceLabels(service *sf.ServiceItem, app *sf.ApplicationItem, prefix string) (map[string]string, error) - GetProperties(name string) (bool, map[string]string, error) -} - -// replicaInstance interface provides a unified interface -// over replicas and instances -type replicaInstance interface { - GetReplicaData() (string, *sf.ReplicaItemBase) -} diff --git a/vendor/github.com/jjcollinge/logrus-appinsights/LICENSE b/vendor/github.com/jjcollinge/logrus-appinsights/LICENSE deleted file mode 100644 index 73897a54d..000000000 --- a/vendor/github.com/jjcollinge/logrus-appinsights/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2018 jjcollinge - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/jjcollinge/logrus-appinsights/config.go b/vendor/github.com/jjcollinge/logrus-appinsights/config.go deleted file mode 100644 index 5e297a559..000000000 --- a/vendor/github.com/jjcollinge/logrus-appinsights/config.go +++ /dev/null @@ -1,11 +0,0 @@ -package logrus_appinsights - -import "time" - -// Config for Application Insights settings -type Config struct { - InstrumentationKey string - EndpointUrl string - MaxBatchSize int - MaxBatchInterval time.Duration -} diff --git a/vendor/github.com/jjcollinge/logrus-appinsights/hook.go b/vendor/github.com/jjcollinge/logrus-appinsights/hook.go deleted file mode 100644 index f288124cd..000000000 --- a/vendor/github.com/jjcollinge/logrus-appinsights/hook.go +++ /dev/null @@ -1,173 +0,0 @@ -package logrus_appinsights - -import ( - "encoding/json" - "fmt" - - "github.com/Microsoft/ApplicationInsights-Go/appinsights" - "github.com/sirupsen/logrus" -) - -var defaultLevels = []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, -} - -var levelMap = map[logrus.Level]appinsights.SeverityLevel{ - logrus.PanicLevel: appinsights.Critical, - logrus.FatalLevel: appinsights.Critical, - logrus.ErrorLevel: appinsights.Error, - logrus.WarnLevel: appinsights.Warning, - logrus.InfoLevel: appinsights.Information, -} - -// AppInsightsHook is a logrus hook for Application Insights -type AppInsightsHook struct { - client appinsights.TelemetryClient - - async bool - levels []logrus.Level - ignoreFields map[string]struct{} - filters map[string]func(interface{}) interface{} -} - -// New returns an initialised logrus hook for Application Insights -func New(name string, conf Config) (*AppInsightsHook, error) { - if conf.InstrumentationKey == "" { - return nil, fmt.Errorf("InstrumentationKey is required and missing from configuration") - } - telemetryConf := appinsights.NewTelemetryConfiguration(conf.InstrumentationKey) - if conf.MaxBatchSize != 0 { - telemetryConf.MaxBatchSize = conf.MaxBatchSize - } - if conf.MaxBatchInterval != 0 { - telemetryConf.MaxBatchInterval = conf.MaxBatchInterval - } - if conf.EndpointUrl != "" { - telemetryConf.EndpointUrl = conf.EndpointUrl - } - telemetryClient := appinsights.NewTelemetryClientFromConfig(telemetryConf) - if name != "" { - telemetryClient.Context().Cloud().SetRoleName(name) - } - return &AppInsightsHook{ - client: telemetryClient, - levels: defaultLevels, - ignoreFields: make(map[string]struct{}), - filters: make(map[string]func(interface{}) interface{}), - }, nil -} - -// NewWithAppInsightsConfig returns an initialised logrus hook for Application Insights -func NewWithAppInsightsConfig(name string, conf *appinsights.TelemetryConfiguration) (*AppInsightsHook, error) { - if conf == nil { - return nil, fmt.Errorf("Nil configuration provided") - } - if conf.InstrumentationKey == "" { - return nil, fmt.Errorf("InstrumentationKey is required in configuration") - } - telemetryClient := appinsights.NewTelemetryClientFromConfig(conf) - if name != "" { - telemetryClient.Context().Cloud().SetRoleName(name) - } - return &AppInsightsHook{ - client: telemetryClient, - levels: defaultLevels, - ignoreFields: make(map[string]struct{}), - filters: make(map[string]func(interface{}) interface{}), - }, nil -} - -// Levels returns logging level to fire this hook. -func (hook *AppInsightsHook) Levels() []logrus.Level { - return hook.levels -} - -// SetLevels sets logging level to fire this hook. -func (hook *AppInsightsHook) SetLevels(levels []logrus.Level) { - hook.levels = levels -} - -// SetAsync sets async flag for sending logs asynchronously. -// If use this true, Fire() does not return error. -func (hook *AppInsightsHook) SetAsync(async bool) { - hook.async = async -} - -// AddIgnore adds field name to ignore. -func (hook *AppInsightsHook) AddIgnore(name string) { - hook.ignoreFields[name] = struct{}{} -} - -// AddFilter adds a custom filter function. -func (hook *AppInsightsHook) AddFilter(name string, fn func(interface{}) interface{}) { - hook.filters[name] = fn -} - -// Fire is invoked by logrus and sends log data to Application Insights. -func (hook *AppInsightsHook) Fire(entry *logrus.Entry) error { - if !hook.async { - return hook.fire(entry) - } - // async - fire and forget - go hook.fire(entry) - return nil -} - -func (hook *AppInsightsHook) fire(entry *logrus.Entry) error { - trace, err := hook.buildTrace(entry) - if err != nil { - return err - } - hook.client.TrackTraceTelemetry(trace) - return nil -} - -func (hook *AppInsightsHook) buildTrace(entry *logrus.Entry) (*appinsights.TraceTelemetry, error) { - // Add the message as a field if it isn't already - if _, ok := entry.Data["message"]; !ok { - entry.Data["message"] = entry.Message - } - - level := levelMap[entry.Level] - trace := appinsights.NewTraceTelemetry(entry.Message, level) - if trace == nil { - return nil, fmt.Errorf("Could not create telemetry trace with entry %+v", entry) - } - for k, v := range entry.Data { - if _, ok := hook.ignoreFields[k]; ok { - continue - } - if fn, ok := hook.filters[k]; ok { - v = fn(v) // apply custom filter - } else { - v = formatData(v) // use default formatter - } - vStr := fmt.Sprintf("%v", v) - trace.SetProperty(k, vStr) - } - trace.SetProperty("source_level", entry.Level.String()) - trace.SetProperty("source_timestamp", entry.Time.String()) - return trace, nil -} - -// formatData returns value as a suitable format. -func formatData(value interface{}) (formatted interface{}) { - switch value := value.(type) { - case json.Marshaler: - return value - case error: - return value.Error() - case fmt.Stringer: - return value.String() - default: - return value - } -} - -func stringPtr(str string) *string { - return &str -} diff --git a/vendor/github.com/jjcollinge/servicefabric/LICENSE b/vendor/github.com/jjcollinge/servicefabric/LICENSE deleted file mode 100644 index 699b19cf4..000000000 --- a/vendor/github.com/jjcollinge/servicefabric/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 Joni Collinge - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/jjcollinge/servicefabric/query.go b/vendor/github.com/jjcollinge/servicefabric/query.go deleted file mode 100644 index 1310a378b..000000000 --- a/vendor/github.com/jjcollinge/servicefabric/query.go +++ /dev/null @@ -1,20 +0,0 @@ -package servicefabric - -type queryParamsFunc func(params []string) []string - -func withContinue(token string) queryParamsFunc { - if len(token) == 0 { - return noOp - } - return withParam("continue", token) -} - -func withParam(name, value string) queryParamsFunc { - return func(params []string) []string { - return append(params, name+"="+value) - } -} - -func noOp(params []string) []string { - return params -} diff --git a/vendor/github.com/jjcollinge/servicefabric/servicefabric.go b/vendor/github.com/jjcollinge/servicefabric/servicefabric.go deleted file mode 100644 index 838da6c6f..000000000 --- a/vendor/github.com/jjcollinge/servicefabric/servicefabric.go +++ /dev/null @@ -1,392 +0,0 @@ -// Package servicefabric is an opinionated Service Fabric client written in Golang -package servicefabric - -import ( - "crypto/tls" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "io/ioutil" - "net/http" - "strings" -) - -// DefaultAPIVersion is a default Service Fabric REST API version -const DefaultAPIVersion = "3.0" - -// Client for Service Fabric. -// This is purposely a subset of the total Service Fabric API surface. -type Client struct { - // endpoint Service Fabric cluster management endpoint - endpoint string - // apiVersion Service Fabric API version - apiVersion string - // httpClient HTTP client - httpClient *http.Client -} - -// NewClient returns a new provider client that can query the -// Service Fabric management API externally or internally -func NewClient(httpClient *http.Client, endpoint, apiVersion string, tlsConfig *tls.Config) (*Client, error) { - if endpoint == "" { - return nil, errors.New("endpoint missing for httpClient configuration") - } - if apiVersion == "" { - apiVersion = DefaultAPIVersion - } - - if tlsConfig != nil { - tlsConfig.Renegotiation = tls.RenegotiateFreelyAsClient - tlsConfig.BuildNameToCertificate() - httpClient.Transport = &http.Transport{TLSClientConfig: tlsConfig} - } - - return &Client{ - endpoint: endpoint, - apiVersion: apiVersion, - httpClient: httpClient, - }, nil -} - -// GetApplications returns all the registered applications -// within the Service Fabric cluster. -func (c Client) GetApplications() (*ApplicationItemsPage, error) { - var aggregateAppItemsPages ApplicationItemsPage - var continueToken string - for { - res, err := c.getHTTP("Applications/", withContinue(continueToken)) - if err != nil { - return nil, err - } - - var appItemsPage ApplicationItemsPage - err = json.Unmarshal(res, &appItemsPage) - if err != nil { - return nil, fmt.Errorf("could not deserialise JSON response: %+v", err) - } - - aggregateAppItemsPages.Items = append(aggregateAppItemsPages.Items, appItemsPage.Items...) - - continueToken = getString(appItemsPage.ContinuationToken) - if continueToken == "" { - break - } - } - return &aggregateAppItemsPages, nil -} - -// GetServices returns all the services associated -// with a Service Fabric application. -func (c Client) GetServices(appName string) (*ServiceItemsPage, error) { - var aggregateServiceItemsPages ServiceItemsPage - var continueToken string - for { - res, err := c.getHTTP("Applications/"+appName+"/$/GetServices", withContinue(continueToken)) - if err != nil { - return nil, err - } - - var servicesItemsPage ServiceItemsPage - err = json.Unmarshal(res, &servicesItemsPage) - if err != nil { - return nil, fmt.Errorf("could not deserialise JSON response: %+v", err) - } - - aggregateServiceItemsPages.Items = append(aggregateServiceItemsPages.Items, servicesItemsPage.Items...) - - continueToken = getString(servicesItemsPage.ContinuationToken) - if continueToken == "" { - break - } - } - return &aggregateServiceItemsPages, nil -} - -// GetPartitions returns all the partitions associated -// with a Service Fabric service. -func (c Client) GetPartitions(appName, serviceName string) (*PartitionItemsPage, error) { - var aggregatePartitionItemsPages PartitionItemsPage - var continueToken string - for { - basePath := "Applications/" + appName + "/$/GetServices/" + serviceName + "/$/GetPartitions/" - res, err := c.getHTTP(basePath, withContinue(continueToken)) - if err != nil { - return nil, err - } - - var partitionsItemsPage PartitionItemsPage - err = json.Unmarshal(res, &partitionsItemsPage) - if err != nil { - return nil, fmt.Errorf("could not deserialise JSON response: %+v", err) - } - - aggregatePartitionItemsPages.Items = append(aggregatePartitionItemsPages.Items, partitionsItemsPage.Items...) - - continueToken = getString(partitionsItemsPage.ContinuationToken) - if continueToken == "" { - break - } - } - return &aggregatePartitionItemsPages, nil -} - -// GetInstances returns all the instances associated -// with a stateless Service Fabric partition. -func (c Client) GetInstances(appName, serviceName, partitionName string) (*InstanceItemsPage, error) { - var aggregateInstanceItemsPages InstanceItemsPage - var continueToken string - for { - basePath := "Applications/" + appName + "/$/GetServices/" + serviceName + "/$/GetPartitions/" + partitionName + "/$/GetReplicas" - res, err := c.getHTTP(basePath, withContinue(continueToken)) - if err != nil { - return nil, err - } - - var instanceItemsPage InstanceItemsPage - err = json.Unmarshal(res, &instanceItemsPage) - if err != nil { - return nil, fmt.Errorf("could not deserialise JSON response: %+v", err) - } - - aggregateInstanceItemsPages.Items = append(aggregateInstanceItemsPages.Items, instanceItemsPage.Items...) - - continueToken = getString(instanceItemsPage.ContinuationToken) - if continueToken == "" { - break - } - } - return &aggregateInstanceItemsPages, nil -} - -// GetReplicas returns all the replicas associated -// with a stateful Service Fabric partition. -func (c Client) GetReplicas(appName, serviceName, partitionName string) (*ReplicaItemsPage, error) { - var aggregateReplicaItemsPages ReplicaItemsPage - var continueToken string - for { - basePath := "Applications/" + appName + "/$/GetServices/" + serviceName + "/$/GetPartitions/" + partitionName + "/$/GetReplicas" - res, err := c.getHTTP(basePath, withContinue(continueToken)) - if err != nil { - return nil, err - } - - var replicasItemsPage ReplicaItemsPage - err = json.Unmarshal(res, &replicasItemsPage) - if err != nil { - return nil, fmt.Errorf("could not deserialise JSON response: %+v", err) - } - - aggregateReplicaItemsPages.Items = append(aggregateReplicaItemsPages.Items, replicasItemsPage.Items...) - - continueToken = getString(replicasItemsPage.ContinuationToken) - if continueToken == "" { - break - } - } - return &aggregateReplicaItemsPages, nil -} - -// GetServiceExtension returns all the extensions specified -// in a Service's manifest file. If the XML schema does not -// map to the provided interface, the default type interface will -// be returned. -func (c Client) GetServiceExtension(appType, applicationVersion, serviceTypeName, extensionKey string, response interface{}) error { - res, err := c.getHTTP("ApplicationTypes/"+appType+"/$/GetServiceTypes", withParam("ApplicationTypeVersion", applicationVersion)) - if err != nil { - return fmt.Errorf("error requesting service extensions: %v", err) - } - - var serviceTypes []ServiceType - err = json.Unmarshal(res, &serviceTypes) - if err != nil { - return fmt.Errorf("could not deserialise JSON response: %+v", err) - } - - for _, serviceTypeInfo := range serviceTypes { - if serviceTypeInfo.ServiceTypeDescription.ServiceTypeName == serviceTypeName { - for _, extension := range serviceTypeInfo.ServiceTypeDescription.Extensions { - if strings.EqualFold(extension.Key, extensionKey) { - err = xml.Unmarshal([]byte(extension.Value), &response) - if err != nil { - return fmt.Errorf("could not deserialise extension's XML value: %+v", err) - } - return nil - } - } - } - } - return nil -} - -// GetServiceExtensionMap returns all the extension xml specified -// in a Service's manifest file into (which must conform to ServiceExtensionLabels) -// a map[string]string -func (c Client) GetServiceExtensionMap(service *ServiceItem, app *ApplicationItem, extensionKey string) (map[string]string, error) { - extensionData := ServiceExtensionLabels{} - err := c.GetServiceExtension(app.TypeName, app.TypeVersion, service.TypeName, extensionKey, &extensionData) - if err != nil { - return nil, err - } - - labels := map[string]string{} - if extensionData.Label != nil { - for _, label := range extensionData.Label { - labels[label.Key] = label.Value - } - } - - return labels, nil -} - -// GetProperties uses the Property Manager API to retrieve -// string properties from a name as a dictionary -// Property name is the path to the properties you would like to list. -// for example a serviceID -func (c Client) GetProperties(name string) (bool, map[string]string, error) { - nameExists, err := c.nameExists(name) - if err != nil { - return false, nil, err - } - - if !nameExists { - return false, nil, nil - } - - properties := make(map[string]string) - - var continueToken string - for { - res, err := c.getHTTP("Names/"+name+"/$/GetProperties", withContinue(continueToken), withParam("IncludeValues", "true")) - if err != nil { - return false, nil, err - } - - var propertiesListPage PropertiesListPage - err = json.Unmarshal(res, &propertiesListPage) - if err != nil { - return false, nil, fmt.Errorf("could not deserialise JSON response: %+v", err) - } - - for _, property := range propertiesListPage.Properties { - if property.Value.Kind != "String" { - continue - } - properties[property.Name] = property.Value.Data - } - - continueToken = propertiesListPage.ContinuationToken - if continueToken == "" { - break - } - } - - return true, properties, nil -} - -// GetServiceLabels add labels from service manifest extensions and properties manager -// expects extension xml in -// -// Deprecated: Use GetProperties and GetServiceExtensionMap instead. -func (c Client) GetServiceLabels(service *ServiceItem, app *ApplicationItem, prefix string) (map[string]string, error) { - extensionData := ServiceExtensionLabels{} - err := c.GetServiceExtension(app.TypeName, app.TypeVersion, service.TypeName, prefix, &extensionData) - if err != nil { - return nil, err - } - - prefixPeriod := prefix + "." - - labels := map[string]string{} - if extensionData.Label != nil { - for _, label := range extensionData.Label { - if strings.HasPrefix(label.Key, prefixPeriod) { - labelKey := strings.Replace(label.Key, prefixPeriod, "", -1) - labels[labelKey] = label.Value - } - } - } - - exists, properties, err := c.GetProperties(service.ID) - if err != nil { - return nil, err - } - - if exists { - for k, v := range properties { - if strings.HasPrefix(k, prefixPeriod) { - labelKey := strings.Replace(k, prefixPeriod, "", -1) - labels[labelKey] = v - } - } - } - - return labels, nil -} - -func (c Client) nameExists(propertyName string) (bool, error) { - res, err := c.getHTTPRaw("Names/" + propertyName) - // Get http will return error for any non 200 response code. - if err != nil { - return false, err - } - - return res.StatusCode == http.StatusOK, nil -} - -func (c Client) getHTTP(basePath string, paramsFuncs ...queryParamsFunc) ([]byte, error) { - if c.httpClient == nil { - return nil, errors.New("invalid http client provided") - } - - url := c.getURL(basePath, paramsFuncs...) - res, err := c.httpClient.Get(url) - if err != nil { - return nil, fmt.Errorf("failed to connect to Service Fabric server %+v on %s", err, url) - } - if res.StatusCode != http.StatusOK { - return nil, fmt.Errorf("Service Fabric responded with error code %s to request %s with body %v", res.Status, url, res.Body) - } - - if res.Body == nil { - return nil, errors.New("empty response body from Service Fabric") - } - defer res.Body.Close() - - body, readErr := ioutil.ReadAll(res.Body) - if readErr != nil { - return nil, fmt.Errorf("failed to read response body from Service Fabric response %+v", readErr) - } - return body, nil -} - -func (c Client) getHTTPRaw(basePath string) (*http.Response, error) { - if c.httpClient == nil { - return nil, fmt.Errorf("invalid http client provided") - } - - url := c.getURL(basePath) - - res, err := c.httpClient.Get(url) - if err != nil { - return nil, fmt.Errorf("failed to connect to Service Fabric server %+v on %s", err, url) - } - return res, nil -} - -func (c Client) getURL(basePath string, paramsFuncs ...queryParamsFunc) string { - params := []string{"api-version=" + c.apiVersion} - - for _, paramsFunc := range paramsFuncs { - params = paramsFunc(params) - } - - return fmt.Sprintf("%s/%s?%s", c.endpoint, basePath, strings.Join(params, "&")) -} - -func getString(str *string) string { - if str == nil { - return "" - } - return *str -} diff --git a/vendor/github.com/jjcollinge/servicefabric/types.go b/vendor/github.com/jjcollinge/servicefabric/types.go deleted file mode 100644 index 2ec150c1f..000000000 --- a/vendor/github.com/jjcollinge/servicefabric/types.go +++ /dev/null @@ -1,199 +0,0 @@ -package servicefabric - -import "encoding/xml" - -// ApplicationItemsPage encapsulates the paged response -// model for Applications in the Service Fabric API -type ApplicationItemsPage struct { - ContinuationToken *string `json:"ContinuationToken"` - Items []ApplicationItem `json:"Items"` -} - -// AppParameter Application parameter -type AppParameter struct { - Key string `json:"Key"` - Value string `json:"Value"` -} - -// ApplicationItem encapsulates the embedded model for -// ApplicationItems within the ApplicationItemsPage model -type ApplicationItem struct { - HealthState string `json:"HealthState"` - ID string `json:"Id"` - Name string `json:"Name"` - Parameters []*AppParameter `json:"Parameters"` - Status string `json:"Status"` - TypeName string `json:"TypeName"` - TypeVersion string `json:"TypeVersion"` -} - -// ServiceItemsPage encapsulates the paged response -// model for Services in the Service Fabric API -type ServiceItemsPage struct { - ContinuationToken *string `json:"ContinuationToken"` - Items []ServiceItem `json:"Items"` -} - -// ServiceItem encapsulates the embedded model for -// ServiceItems within the ServiceItemsPage model -type ServiceItem struct { - HasPersistedState bool `json:"HasPersistedState"` - HealthState string `json:"HealthState"` - ID string `json:"Id"` - IsServiceGroup bool `json:"IsServiceGroup"` - ManifestVersion string `json:"ManifestVersion"` - Name string `json:"Name"` - ServiceKind string `json:"ServiceKind"` - ServiceStatus string `json:"ServiceStatus"` - TypeName string `json:"TypeName"` -} - -// PartitionItemsPage encapsulates the paged response -// model for PartitionItems in the Service Fabric API -type PartitionItemsPage struct { - ContinuationToken *string `json:"ContinuationToken"` - Items []PartitionItem `json:"Items"` -} - -// PartitionItem encapsulates the service information -// returned for each PartitionItem under the service -type PartitionItem struct { - CurrentConfigurationEpoch ConfigurationEpoch `json:"CurrentConfigurationEpoch"` - HealthState string `json:"HealthState"` - MinReplicaSetSize int64 `json:"MinReplicaSetSize"` - PartitionInformation PartitionInformation `json:"PartitionInformation"` - PartitionStatus string `json:"PartitionStatus"` - ServiceKind string `json:"ServiceKind"` - TargetReplicaSetSize int64 `json:"TargetReplicaSetSize"` -} - -// ConfigurationEpoch Partition configuration epoch -type ConfigurationEpoch struct { - ConfigurationVersion string `json:"ConfigurationVersion"` - DataLossVersion string `json:"DataLossVersion"` -} - -// PartitionInformation Partition information -type PartitionInformation struct { - HighKey string `json:"HighKey"` - ID string `json:"Id"` - LowKey string `json:"LowKey"` - ServicePartitionKind string `json:"ServicePartitionKind"` -} - -// ReplicaItemBase shared data used -// in both replicas and instances -type ReplicaItemBase struct { - Address string `json:"Address"` - HealthState string `json:"HealthState"` - LastInBuildDurationInSeconds string `json:"LastInBuildDurationInSeconds"` - NodeName string `json:"NodeName"` - ReplicaRole string `json:"ReplicaRole"` - ReplicaStatus string `json:"ReplicaStatus"` - ServiceKind string `json:"ServiceKind"` -} - -// ReplicaItemsPage encapsulates the response -// model for Replicas in the Service Fabric API -type ReplicaItemsPage struct { - ContinuationToken *string `json:"ContinuationToken"` - Items []ReplicaItem `json:"Items"` -} - -// ReplicaItem holds replica specific data -type ReplicaItem struct { - *ReplicaItemBase - ID string `json:"ReplicaId"` -} - -// GetReplicaData returns replica data -func (m *ReplicaItem) GetReplicaData() (string, *ReplicaItemBase) { - return m.ID, m.ReplicaItemBase -} - -// InstanceItemsPage encapsulates the response -// model for Instances in the Service Fabric API -type InstanceItemsPage struct { - ContinuationToken *string `json:"ContinuationToken"` - Items []InstanceItem `json:"Items"` -} - -// InstanceItem hold instance specific data -type InstanceItem struct { - *ReplicaItemBase - ID string `json:"InstanceId"` -} - -// GetReplicaData returns replica data from an instance -func (m *InstanceItem) GetReplicaData() (string, *ReplicaItemBase) { - return m.ID, m.ReplicaItemBase -} - -// ServiceType encapsulates the response model for -// Service types in the Service Fabric API -type ServiceType struct { - ServiceTypeDescription ServiceTypeDescription `json:"ServiceTypeDescription"` - ServiceManifestVersion string `json:"ServiceManifestVersion"` - ServiceManifestName string `json:"ServiceManifestName"` - IsServiceGroup bool `json:"IsServiceGroup"` -} - -// ServiceTypeDescription Service Type Description -type ServiceTypeDescription struct { - IsStateful bool `json:"IsStateful"` - ServiceTypeName string `json:"ServiceTypeName"` - PlacementConstraints string `json:"PlacementConstraints"` - HasPersistedState bool `json:"HasPersistedState"` - Kind string `json:"Kind"` - Extensions []KeyValuePair `json:"Extensions"` - LoadMetrics []interface{} `json:"LoadMetrics"` - ServicePlacementPolicies []interface{} `json:"ServicePlacementPolicies"` -} - -// PropertiesListPage encapsulates the response model for -// PagedPropertyInfoList in the Service Fabric API -type PropertiesListPage struct { - ContinuationToken string `json:"ContinuationToken"` - IsConsistent bool `json:"IsConsistent"` - Properties []Property `json:"Properties"` -} - -// Property Paged Property Info -type Property struct { - Metadata Metadata `json:"Metadata"` - Name string `json:"Name"` - Value PropValue `json:"Value"` -} - -// Metadata Property Metadata -type Metadata struct { - CustomTypeID string `json:"CustomTypeId"` - LastModifiedUtcTimestamp string `json:"LastModifiedUtcTimestamp"` - Parent string `json:"Parent"` - SequenceNumber string `json:"SequenceNumber"` - SizeInBytes int64 `json:"SizeInBytes"` - TypeID string `json:"TypeId"` -} - -// PropValue Property value -type PropValue struct { - Data string `json:"Data"` - Kind string `json:"Kind"` -} - -// KeyValuePair represents a key value pair structure -type KeyValuePair struct { - Key string `json:"Key"` - Value string `json:"Value"` -} - -// ServiceExtensionLabels provides the structure for -// deserialising the XML document used to store labels in an Extension -type ServiceExtensionLabels struct { - XMLName xml.Name `xml:"Labels"` - Label []struct { - XMLName xml.Name `xml:"Label"` - Value string `xml:",chardata"` - Key string `xml:"Key,attr"` - } -} diff --git a/version/version.go b/version/version.go index 77b35b612..a9dbba455 100644 --- a/version/version.go +++ b/version/version.go @@ -30,8 +30,8 @@ var ( }) ) -// AddRoutes add version routes on a router -func (v Handler) AddRoutes(router *mux.Router) { +// Append adds version routes on a router. +func (v Handler) Append(router *mux.Router) { router.Methods(http.MethodGet).Path("/api/version"). HandlerFunc(func(response http.ResponseWriter, request *http.Request) { v := struct {