Merge branch 'v1.6' into mrg-current-v1.6

This commit is contained in:
Fernandez Ludovic 2018-04-12 14:36:36 +02:00
commit de3aeb9732
310 changed files with 28693 additions and 12148 deletions

3
.gitattributes vendored
View file

@ -1,2 +1 @@
vendor/github.com/xenolf/lego/providers/dns/cloudxns/cloudxns.go eol=crlf
# vendor/github.com/xenolf/lego/providers/dns/cloudxns/cloudxns.go eol=crlf

View file

@ -46,6 +46,10 @@ HOW TO WRITE A GOOD ISSUE?
For the Traefik Docker image:
docker run [IMAGE] version
ex: docker run traefik version
For the alpine Traefik Docker image:
docker run [IMAGE] traefik version
ex: docker run traefik traefik version
-->
```

View file

@ -44,6 +44,10 @@ HOW TO WRITE A GOOD ISSUE?
For the Traefik Docker image:
docker run [IMAGE] version
ex: docker run traefik version
For the alpine Traefik Docker image:
docker run [IMAGE] traefik version
ex: docker run traefik traefik version
-->
```

View file

@ -1,5 +1,31 @@
# Change Log
## [v1.6.0-rc5](https://github.com/containous/traefik/tree/v1.6.0-rc5) (2018-04-12)
[All Commits](https://github.com/containous/traefik/compare/v1.6.0-rc4...v1.6.0-rc5)
**Enhancements:**
- **[acme]** Generate wildcard certificate with SANs in ACME ([#3167](https://github.com/containous/traefik/pull/3167) by [nmengin](https://github.com/nmengin))
- **[ecs]** Factorize labels managements. ([#3159](https://github.com/containous/traefik/pull/3159) by [ldez](https://github.com/ldez))
**Bug fixes:**
- **[acme]** Update lego. ([#3158](https://github.com/containous/traefik/pull/3158) by [ldez](https://github.com/ldez))
- **[acme]** Fix acme.json file automatic creation ([#3156](https://github.com/containous/traefik/pull/3156) by [nmengin](https://github.com/nmengin))
- **[acme]** Minor updates to dumpcerts.sh ([#3116](https://github.com/containous/traefik/pull/3116) by [mathuin](https://github.com/mathuin))
- **[acme]** Add TTL and custom Timeout in DigitalOcean DNS provider ([#3143](https://github.com/containous/traefik/pull/3143) by [ldez](https://github.com/ldez))
- **[acme]** Add ACME certificates only on ACME EntryPoint ([#3136](https://github.com/containous/traefik/pull/3136) by [nmengin](https://github.com/nmengin))
- **[consul,docker,ecs,eureka,k8s,kv,marathon,mesos,rancher]** Server weight zero ([#3130](https://github.com/containous/traefik/pull/3130) by [ldez](https://github.com/ldez))
- **[k8s]** Limit label selector to Ingress factory. ([#3137](https://github.com/containous/traefik/pull/3137) by [timoreimann](https://github.com/timoreimann))
- **[middleware,consul,consulcatalog,docker,ecs,kv,marathon,mesos,rancher]** Fix: error pages ([#3138](https://github.com/containous/traefik/pull/3138) by [ldez](https://github.com/ldez))
- **[webui]** Remove useless ACME tab from UI. ([#3154](https://github.com/containous/traefik/pull/3154) by [ldez](https://github.com/ldez))
**Documentation:**
- **[k8s]** Update kubernetes.md ([#3171](https://github.com/containous/traefik/pull/3171) by [andreyfedoseev](https://github.com/andreyfedoseev))
- Update some examples ([#3150](https://github.com/containous/traefik/pull/3150) by [zaporylie](https://github.com/zaporylie))
- Normalize parameter names in configs ([#3132](https://github.com/containous/traefik/pull/3132) by [kachkaev](https://github.com/kachkaev))
**Misc:**
- **[oxy]** Disable closeNotify when method GET for http pipelining ([#3108](https://github.com/containous/traefik/pull/3108) by [Juliens](https://github.com/Juliens))
## [v1.6.0-rc4](https://github.com/containous/traefik/tree/v1.6.0-rc4) (2018-04-04)
[All Commits](https://github.com/containous/traefik/compare/v1.6.0-rc3...v1.6.0-rc4)

77
Gopkg.lock generated
View file

@ -24,8 +24,12 @@
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
packages = ["arm/dns"]
revision = "f7bb4db3ea4c73dc58bd284c38ea644a79324be0"
packages = [
"services/dns/mgmt/2017-09-01/dns",
"version"
]
revision = "068ec4d616be5b2175509bf1fb3e4c8ea160d5c8"
version = "v15.0.1"
[[projects]]
branch = "master"
@ -45,8 +49,8 @@
"autorest/date",
"autorest/to"
]
revision = "f6be1abbb5abd0517522f850dd785990d373da7e"
version = "v9.0.0"
revision = "9ad9326b278af8fa5cc67c30c0ce9a58cc0862b2"
version = "v10.6.0"
[[projects]]
branch = "master"
@ -140,6 +144,17 @@
]
revision = "063d875e3c5fd734fa2aa12fac83829f62acfc70"
[[projects]]
name = "github.com/akamai/AkamaiOPEN-edgegrid-golang"
packages = [
"client-v1",
"configdns-v1",
"edgegrid",
"jsonhooks-v1"
]
revision = "a494eba1efa1f38338393727dff63389a6a66534"
version = "v0.6.0"
[[projects]]
name = "github.com/aokoli/goutils"
packages = ["."]
@ -193,6 +208,7 @@
"service/dynamodb/dynamodbiface",
"service/ec2",
"service/ecs",
"service/lightsail",
"service/route53",
"service/sts"
]
@ -247,8 +263,8 @@
[[projects]]
name = "github.com/containous/traefik-extra-service-fabric"
packages = ["."]
revision = "29a6d70ad0f15175efbaa5fd93d8afdd8b373b93"
version = "v1.1.1"
revision = "503022efdc178146d598911092af75690510a80c"
version = "v1.1.3"
[[projects]]
name = "github.com/coreos/bbolt"
@ -300,8 +316,8 @@
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
version = "v3.0.0"
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
name = "github.com/dnsimple/dnsimple-go"
@ -914,6 +930,12 @@
packages = ["."]
revision = "db96455566f05ffe42bd6ac671f05eeb1152b45d"
[[projects]]
branch = "master"
name = "github.com/namedotcom/go"
packages = ["namecom"]
revision = "08470befbe04613bd4b44cb6978b05d50294c4d4"
[[projects]]
branch = "master"
name = "github.com/ogier/pflag"
@ -1120,6 +1142,12 @@
]
revision = "37e84520dcf74488f67654f9c775b9752c232dc1"
[[projects]]
branch = "master"
name = "github.com/tuvistavie/securerandom"
packages = ["."]
revision = "15512123a948d62f6361bd84818e11f2ad84059a"
[[projects]]
name = "github.com/tv42/zbase32"
packages = ["."]
@ -1188,7 +1216,7 @@
"roundrobin",
"utils"
]
revision = "dacf34285ce530b272e9fe04d2f45f52e6374e36"
revision = "6956548a7fa4272adeadf828455109c53933ea86"
[[projects]]
name = "github.com/vulcand/predicate"
@ -1214,28 +1242,34 @@
revision = "0c8571ac0ce161a5feb57375a9cdf148c98c0f70"
[[projects]]
branch = "acmev2"
branch = "containous-fork"
name = "github.com/xenolf/lego"
packages = [
"acme",
"acmev2",
"providers/dns",
"providers/dns/auroradns",
"providers/dns/azure",
"providers/dns/bluecat",
"providers/dns/cloudflare",
"providers/dns/cloudxns",
"providers/dns/digitalocean",
"providers/dns/dnsimple",
"providers/dns/dnsmadeeasy",
"providers/dns/dnspod",
"providers/dns/duckdns",
"providers/dns/dyn",
"providers/dns/exec",
"providers/dns/exoscale",
"providers/dns/fastdns",
"providers/dns/gandi",
"providers/dns/gandiv5",
"providers/dns/glesys",
"providers/dns/godaddy",
"providers/dns/googlecloud",
"providers/dns/lightsail",
"providers/dns/linode",
"providers/dns/namecheap",
"providers/dns/namedotcom",
"providers/dns/ns1",
"providers/dns/otc",
"providers/dns/ovh",
@ -1245,7 +1279,8 @@
"providers/dns/route53",
"providers/dns/vultr"
]
revision = "a149e7d6506feb4003da7093cbf818c6b75ed4a4"
revision = "2817d2131186742bc98830c73a5d9c255b3f4537"
source = "github.com/containous/lego"
[[projects]]
branch = "master"
@ -1403,6 +1438,12 @@
revision = "5b3e00af70a9484542169a976dcab8d03e601a17"
version = "v1.30.0"
[[projects]]
branch = "v1"
name = "gopkg.in/mattes/go-expand-tilde.v1"
packages = ["."]
revision = "cb884138e64c9a8bf5c7d6106d74b0fca082df0c"
[[projects]]
name = "gopkg.in/ns1/ns1-go.v2"
packages = [
@ -1415,16 +1456,6 @@
]
revision = "c563826f4cbef9c11bebeb9f20a3f7afe9c1e2f4"
[[projects]]
name = "gopkg.in/square/go-jose.v1"
packages = [
".",
"cipher",
"json"
]
revision = "aa2e30fdd1fe9dd3394119af66451ae790d50e0d"
version = "v1.1.0"
[[projects]]
name = "gopkg.in/square/go-jose.v2"
packages = [
@ -1644,6 +1675,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "593d67272ac35ca0fa59df7f2ac077a81ea842b3181b00acffa20553bfe6f2e0"
inputs-digest = "c441208e9bf330e85e2939b383515f58a4957286960b43c444e6f512d1ff94ee"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -66,7 +66,7 @@
[[constraint]]
name = "github.com/containous/traefik-extra-service-fabric"
version = "1.1.1"
version = "1.1.3"
[[constraint]]
name = "github.com/coreos/go-systemd"
@ -181,8 +181,9 @@
name = "github.com/vulcand/oxy"
[[constraint]]
branch = "acmev2"
branch = "containous-fork"
name = "github.com/xenolf/lego"
source = "github.com/containous/lego"
[[constraint]]
name = "google.golang.org/grpc"

View file

@ -74,7 +74,7 @@ _(But if you'd rather configure some of your routes manually, Træfik supports t
- [Kubernetes](https://docs.traefik.io/configuration/backends/kubernetes)
- [Mesos](https://docs.traefik.io/configuration/backends/mesos) / [Marathon](https://docs.traefik.io/configuration/backends/marathon)
- [Rancher](https://docs.traefik.io/configuration/backends/rancher) (API, Metadata)
- [Service Fabric](https://docs.traefik.io/configuration/backends/servicefabric)
- [Azure Service Fabric](https://docs.traefik.io/configuration/backends/servicefabric)
- [Consul Catalog](https://docs.traefik.io/configuration/backends/consulcatalog)
- [Consul](https://docs.traefik.io/configuration/backends/consul) / [Etcd](https://docs.traefik.io/configuration/backends/etcd) / [Zookeeper](https://docs.traefik.io/configuration/backends/zookeeper) / [BoltDB](https://docs.traefik.io/configuration/backends/boltdb)
- [Eureka](https://docs.traefik.io/configuration/backends/eureka)

View file

@ -26,7 +26,7 @@ import (
"github.com/containous/traefik/tls/generate"
"github.com/containous/traefik/types"
"github.com/eapache/channels"
"github.com/xenolf/lego/acmev2"
acme "github.com/xenolf/lego/acmev2"
"github.com/xenolf/lego/providers/dns"
)
@ -62,20 +62,6 @@ type ACME struct {
}
func (a *ACME) init() error {
// FIXME temporary fix, waiting for https://github.com/xenolf/lego/pull/478
acme.HTTPClient = http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 15 * time.Second,
ResponseHeaderTimeout: 15 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
}
if a.ACMELogging {
acme.Logger = fmtlog.New(os.Stderr, "legolog: ", fmtlog.LstdFlags)
} else {
@ -651,6 +637,7 @@ func (a *ACME) runJobs() {
// getValidDomains checks if given domain is allowed to generate a ACME certificate and return it
func (a *ACME) getValidDomains(domains []string, wildcardAllowed bool) ([]string, error) {
// Check if the domains array is empty or contains only one empty value
if len(domains) == 0 || (len(domains) == 1 && len(domains[0]) == 0) {
return nil, errors.New("unable to generate a certificate when no domain is given")
}
@ -663,15 +650,14 @@ func (a *ACME) getValidDomains(domains []string, wildcardAllowed bool) ([]string
if a.DNSChallenge == nil && len(a.DNSProvider) == 0 {
return nil, fmt.Errorf("unable to generate a wildcard certificate for domain %q : ACME needs a DNSChallenge", strings.Join(domains, ","))
}
if len(domains) > 1 {
return nil, fmt.Errorf("unable to generate a wildcard certificate for domain %q : SANs are not allowed", strings.Join(domains, ","))
if strings.HasPrefix(domains[0], "*.*") {
return nil, fmt.Errorf("unable to generate a wildcard certificate for domain %q : ACME does not allow '*.*' wildcard domain", strings.Join(domains, ","))
}
} else {
for _, san := range domains[1:] {
if strings.HasPrefix(san, "*") {
return nil, fmt.Errorf("unable to generate a certificate in ACME provider for domains %q: SANs can not be a wildcard domain", strings.Join(domains, ","))
}
}
for _, san := range domains[1:] {
if strings.HasPrefix(san, "*") {
return nil, fmt.Errorf("unable to generate a certificate for domains %q: SANs can not be a wildcard domain", strings.Join(domains, ","))
}
}
@ -710,31 +696,37 @@ func (a *ACME) deleteUnnecessaryDomains() {
keepDomain = false
}
break
} else if strings.HasPrefix(domain.Main, "*") && domain.SANs == nil {
// Check if domains can be validated by the wildcard domain
var newDomainsToCheck []string
// Check if domains can be validated by the wildcard domain
domainsMap := make(map[string]*tls.Certificate)
domainsMap[domain.Main] = &tls.Certificate{}
for _, domainProcessed := range domainToCheck.ToStrArray() {
if isDomainAlreadyChecked(domainProcessed, domainsMap) {
log.Warnf("Domain %q will not be processed by ACME because it is validated by the wildcard %q", domainProcessed, domain.Main)
continue
}
newDomainsToCheck = append(newDomainsToCheck, domainProcessed)
}
// Delete the domain if both Main and SANs can be validated by the wildcard domain
// otherwise keep the unchecked values
if newDomainsToCheck == nil {
keepDomain = false
break
}
domainToCheck.Set(newDomainsToCheck)
}
var newDomainsToCheck []string
// Check if domains can be validated by the wildcard domain
domainsMap := make(map[string]*tls.Certificate)
domainsMap[domain.Main] = &tls.Certificate{}
if len(domain.SANs) > 0 {
domainsMap[strings.Join(domain.SANs, ",")] = &tls.Certificate{}
}
for _, domainProcessed := range domainToCheck.ToStrArray() {
if idxDomain < idxDomainToCheck && isDomainAlreadyChecked(domainProcessed, domainsMap) {
// The domain is duplicated in a CN
log.Warnf("Domain %q is duplicated in the configuration or validated by the domain %v. It will be processed once.", domainProcessed, domain)
continue
} else if domain.Main != domainProcessed && strings.HasPrefix(domain.Main, "*") && types.MatchDomain(domainProcessed, domain.Main) {
// Check if a wildcard can validate the domain
log.Warnf("Domain %q will not be processed by ACME provider because it is validated by the wildcard %q", domainProcessed, domain.Main)
continue
}
newDomainsToCheck = append(newDomainsToCheck, domainProcessed)
}
// Delete the domain if both Main and SANs can be validated by the wildcard domain
// otherwise keep the unchecked values
if newDomainsToCheck == nil {
keepDomain = false
break
}
domainToCheck.Set(newDomainsToCheck)
}
if keepDomain {

View file

@ -14,7 +14,7 @@ import (
"github.com/containous/traefik/tls/generate"
"github.com/containous/traefik/types"
"github.com/stretchr/testify/assert"
"github.com/xenolf/lego/acmev2"
acme "github.com/xenolf/lego/acmev2"
)
func TestDomainsSet(t *testing.T) {
@ -417,11 +417,27 @@ func TestAcme_getValidDomain(t *testing.T) {
expectedDomains: nil,
},
{
desc: "unexpected SANs",
domains: []string{"*.traefik.wtf", "foo.traefik.wtf"},
desc: "unauthorized wildcard with SAN",
domains: []string{"*.*.traefik.wtf", "foo.traefik.wtf"},
dnsChallenge: &acmeprovider.DNSChallenge{},
wildcardAllowed: true,
expectedErr: "unable to generate a wildcard certificate for domain \"*.traefik.wtf,foo.traefik.wtf\" : SANs are not allowed",
expectedErr: "unable to generate a wildcard certificate for domain \"*.*.traefik.wtf,foo.traefik.wtf\" : ACME does not allow '*.*' wildcard domain",
expectedDomains: nil,
},
{
desc: "wildcard with SANs",
domains: []string{"*.traefik.wtf", "traefik.wtf"},
dnsChallenge: &acmeprovider.DNSChallenge{},
wildcardAllowed: true,
expectedErr: "",
expectedDomains: []string{"*.traefik.wtf", "traefik.wtf"},
},
{
desc: "unexpected SANs",
domains: []string{"*.traefik.wtf", "*.acme.wtf"},
dnsChallenge: &acmeprovider.DNSChallenge{},
wildcardAllowed: true,
expectedErr: "unable to generate a certificate for domains \"*.traefik.wtf,*.acme.wtf\": SANs can not be a wildcard domain",
expectedDomains: nil,
},
}

View file

@ -26,7 +26,7 @@ func NewLocalStore(file string) *LocalStore {
func (s *LocalStore) Get() (*Account, error) {
account := &Account{}
hasData, err := checkFile(s.file)
hasData, err := acme.CheckFile(s.file)
if err != nil {
return nil, err
}

View file

@ -231,7 +231,7 @@ var _templatesConsul_catalogTmpl = []byte(`[backends]
status = [{{range $page.Status }}
"{{.}}",
{{end}}]
backend = "{{ $page.Backend }}"
backend = "backend-{{ $page.Backend }}"
query = "{{ $page.Query }}"
{{end}}
{{end}}
@ -632,7 +632,7 @@ var _templatesDockerTmpl = []byte(`{{$backendServers := .Servers}}
status = [{{range $page.Status }}
"{{.}}",
{{end}}]
backend = "{{ $page.Backend }}"
backend = "backend-{{ $page.Backend }}"
query = "{{ $page.Query }}"
{{end}}
{{end}}
@ -884,7 +884,7 @@ var _templatesEcsTmpl = []byte(`[backends]
status = [{{range $page.Status }}
"{{.}}",
{{end}}]
backend = "{{ $page.Backend }}"
backend = "backend-{{ $page.Backend }}"
query = "{{ $page.Query }}"
{{end}}
{{end}}
@ -1588,7 +1588,7 @@ var _templatesMarathonTmpl = []byte(`{{ $apps := .Applications }}
status = [{{range $page.Status }}
"{{.}}",
{{end}}]
backend = "{{ $page.Backend }}"
backend = "backend{{ $page.Backend }}"
query = "{{ $page.Query }}"
{{end}}
{{end}}
@ -1826,7 +1826,7 @@ var _templatesMesosTmpl = []byte(`[backends]
status = [{{range $page.Status }}
"{{.}}",
{{end}}]
backend = "{{ $page.Backend }}"
backend = "backend-{{ $page.Backend }}"
query = "{{ $page.Query }}"
{{end}}
{{end}}
@ -2117,7 +2117,7 @@ var _templatesRancherTmpl = []byte(`{{ $backendServers := .Backends }}
status = [{{range $page.Status }}
"{{.}}",
{{end}}]
backend = "{{ $page.Backend }}"
backend = "backend-{{ $page.Backend }}"
query = "{{ $page.Query }}"
{{end}}
{{end}}

View file

@ -35,6 +35,7 @@ import (
"github.com/coreos/go-systemd/daemon"
"github.com/ogier/pflag"
"github.com/sirupsen/logrus"
"github.com/vulcand/oxy/roundrobin"
)
func main() {
@ -155,6 +156,10 @@ func runCmd(globalConfiguration *configuration.GlobalConfiguration, configFile s
http.DefaultTransport.(*http.Transport).Proxy = http.ProxyFromEnvironment
if globalConfiguration.AllowMinWeightZero {
roundrobin.SetDefaultWeight(0)
}
globalConfiguration.SetEffectiveConfiguration(configFile)
globalConfiguration.ValidateConfiguration()

View file

@ -78,6 +78,7 @@ type GlobalConfiguration struct {
HealthCheck *HealthCheckConfig `description:"Health check parameters" export:"true"`
RespondingTimeouts *RespondingTimeouts `description:"Timeouts for incoming requests to the Traefik instance" export:"true"`
ForwardingTimeouts *ForwardingTimeouts `description:"Timeouts for requests forwarded to the backend servers" export:"true"`
AllowMinWeightZero bool `description:"Allow weight to take 0 as minimum real value." export:"true"` // Deprecated
Web *WebCompatibility `description:"(Deprecated) Enable Web backend with default settings" export:"true"` // Deprecated
Docker *docker.Provider `description:"Enable Docker backend with default settings" export:"true"`
File *file.Provider `description:"Enable File backend with default settings" export:"true"`

View file

@ -66,7 +66,7 @@ ${USAGE}" >&2
bad_acme() {
echo "
There was a problem parsing your acme.json file. $1
There was a problem parsing your acme.json file.
${USAGE}" >&2
exit 2

View file

@ -170,7 +170,7 @@ Here is an example of frontends definition:
- Three frontends are defined: `frontend1`, `frontend2` and `frontend3`
- `frontend1` will forward the traffic to the `backend2` if the rule `Host:test.localhost,test2.localhost` is matched
- `frontend2` will forward the traffic to the `backend1` if the rule `Host:localhost,{subdomain:[a-z]+}.localhost` is matched (forwarding client `Host` header to the backend)
- `frontend2` will forward the traffic to the `backend1` if the rule `HostRegexp:localhost,{subdomain:[a-z]+}.localhost` is matched (forwarding client `Host` header to the backend)
- `frontend3` will forward the traffic to the `backend2` if the rules `Host:test3.localhost` **AND** `Path:/test` are matched
#### Combining multiple rules
@ -645,18 +645,18 @@ Once a day (the first call begins 10 minutes after the start of Træfik), we col
swarmMode = true
[Docker.TLS]
CA = "dockerCA"
Cert = "dockerCert"
Key = "dockerKey"
InsecureSkipVerify = true
ca = "dockerCA"
cert = "dockerCert"
key = "dockerKey"
insecureSkipVerify = true
[ECS]
Domain = "foo.bar"
ExposedByDefault = true
Clusters = ["foo-bar"]
Region = "us-west-2"
AccessKeyID = "AccessKeyID"
SecretAccessKey = "SecretAccessKey"
domain = "foo.bar"
exposedByDefault = true
clusters = ["foo-bar"]
region = "us-west-2"
accessKeyID = "AccessKeyID"
secretAccessKey = "SecretAccessKey"
```
- Obfuscated and anonymous configuration:
@ -669,24 +669,24 @@ Once a day (the first call begins 10 minutes after the start of Træfik), we col
[api]
[Docker]
Endpoint = "xxxx"
Domain = "xxxx"
ExposedByDefault = true
SwarmMode = true
endpoint = "xxxx"
domain = "xxxx"
exposedByDefault = true
swarmMode = true
[Docker.TLS]
CA = "xxxx"
Cert = "xxxx"
Key = "xxxx"
InsecureSkipVerify = false
ca = "xxxx"
cert = "xxxx"
key = "xxxx"
insecureSkipVerify = false
[ECS]
Domain = "xxxx"
ExposedByDefault = true
Clusters = []
Region = "us-west-2"
AccessKeyID = "xxxx"
SecretAccessKey = "xxxx"
domain = "xxxx"
exposedByDefault = true
clusters = []
region = "us-west-2"
accessKeyID = "xxxx"
secretAccessKey = "xxxx"
```
### Show me the code !

View file

@ -118,7 +118,7 @@ server {
Here is the `traefik.toml` file used:
```toml
MaxIdleConnsPerHost = 100000
maxIdleConnsPerHost = 100000
defaultEntryPoints = ["http"]
[entryPoints]

View file

@ -112,7 +112,7 @@ entryPoint = "https"
#
entryPoint = "http"
# Use a DNS-01/DNS-02 acme challenge rather than HTTP-01 challenge.
# Use a DNS-01/DNS-01 acme challenge rather than HTTP-01 challenge.
# Note : Mandatory for wildcard certificates generation.
#
# Optional
@ -264,7 +264,7 @@ defaultEntryPoints = ["http", "https"]
### `dnsChallenge`
Use `DNS-01/DNS-02` challenge to generate/renew ACME certificates.
Use `DNS-01/DNS-01` challenge to generate/renew ACME certificates.
```toml
[acme]
@ -276,7 +276,7 @@ Use `DNS-01/DNS-02` challenge to generate/renew ACME certificates.
```
!!! note
ACME wildcard certificates can only be generated thanks to a `DNS-02` challenge.
ACME wildcard certificates can only be generated thanks to a `DNS-01` challenge.
#### `provider`
@ -286,21 +286,28 @@ Select the provider that matches the DNS domain that will host the challenge TXT
|--------------------------------------------------------|----------------|---------------------------------------------------------------------------------------------------------------------------|
| [Auroradns](https://www.pcextreme.com/aurora/dns) | `auroradns` | `AURORA_USER_ID`, `AURORA_KEY`, `AURORA_ENDPOINT` |
| [Azure](https://azure.microsoft.com/services/dns/) | `azure` | `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_SUBSCRIPTION_ID`, `AZURE_TENANT_ID`, `AZURE_RESOURCE_GROUP` |
| [Blue Cat](https://www.bluecatnetworks.com/) | `bluecat` | `BLUECAT_SERVER_URL`, `BLUECAT_USER_NAME`, `BLUECAT_PASSWORD`, `BLUECAT_CONFIG_NAME`, `BLUECAT_DNS_VIEW` |
| [Cloudflare](https://www.cloudflare.com) | `cloudflare` | `CLOUDFLARE_EMAIL`, `CLOUDFLARE_API_KEY` - The Cloudflare `Global API Key` needs to be used and not the `Origin CA Key` |
| [CloudXNS](https://www.cloudxns.net) | `cloudxns` | `CLOUDXNS_API_KEY`, `CLOUDXNS_SECRET_KEY` |
| [DigitalOcean](https://www.digitalocean.com) | `digitalocean` | `DO_AUTH_TOKEN` |
| [DNSimple](https://dnsimple.com) | `dnsimple` | `DNSIMPLE_OAUTH_TOKEN`, `DNSIMPLE_BASE_URL` |
| [DNS Made Easy](https://dnsmadeeasy.com) | `dnsmadeeasy` | `DNSMADEEASY_API_KEY`, `DNSMADEEASY_API_SECRET`, `DNSMADEEASY_SANDBOX` |
| [DNSPod](http://www.dnspod.net/) | `dnspod` | `DNSPOD_API_KEY` |
| [Duck DNS](https://www.duckdns.org/) | `duckdns` | `DUCKDNS_TOKEN` |
| [Dyn](https://dyn.com) | `dyn` | `DYN_CUSTOMER_NAME`, `DYN_USER_NAME`, `DYN_PASSWORD` |
| External Program | `exec` | `EXEC_PATH` |
| [Exoscale](https://www.exoscale.ch) | `exoscale` | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET`, `EXOSCALE_ENDPOINT` |
| [Fast DNS](https://www.akamai.com/) | `fastdns` | `AKAMAI_CLIENT_TOKEN`, `AKAMAI_CLIENT_SECRET`, `AKAMAI_ACCESS_TOKEN` |
| [Gandi](https://www.gandi.net) | `gandi` | `GANDI_API_KEY` |
| [Gandi V5](http://doc.livedns.gandi.net) | `gandiv5` | `GANDIV5_API_KEY` |
| [Glesys](https://glesys.com/) | `glesys` | `GLESYS_API_USER`, `GLESYS_API_KEY`, `GLESYS_DOMAIN` |
| [GoDaddy](https://godaddy.com/domains) | `godaddy` | `GODADDY_API_KEY`, `GODADDY_API_SECRET` |
| [Google Cloud DNS](https://cloud.google.com/dns/docs/) | `gcloud` | `GCE_PROJECT`, `GCE_SERVICE_ACCOUNT_FILE` |
| [Lightsail](https://aws.amazon.com/lightsail/) | `lightsail` | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `DNS_ZONE` |
| [Linode](https://www.linode.com) | `linode` | `LINODE_API_KEY` |
| manual | - | none, but run Træfik interactively & turn on `acmeLogging` to see instructions & press <kbd>Enter</kbd>. |
| [Namecheap](https://www.namecheap.com) | `namecheap` | `NAMECHEAP_API_USER`, `NAMECHEAP_API_KEY` |
| [name.com](https://www.name.com/) | `namedotcom` | `NAMECOM_USERNAME`, `NAMECOM_API_TOKEN`, `NAMECOM_SERVER` |
| [Ns1](https://ns1.com/) | `ns1` | `NS1_API_KEY` |
| [Open Telekom Cloud](https://cloud.telekom.de/en/) | `otc` | `OTC_DOMAIN_NAME`, `OTC_USER_NAME`, `OTC_PASSWORD`, `OTC_PROJECT_NAME`, `OTC_IDENTITY_ENDPOINT` |
| [OVH](https://www.ovh.com) | `ovh` | `OVH_ENDPOINT`, `OVH_APPLICATION_KEY`, `OVH_APPLICATION_SECRET`, `OVH_CONSUMER_KEY` |
@ -390,14 +397,18 @@ CA server to use.
main = "local3.com"
[[acme.domains]]
main = "*.local4.com"
sans = ["local4.com", "test1.test1.local4.com"]
# ...
```
#### Wildcard domains
Wildcard domain has to be defined as a main domain **with no SANs** (alternative domains).
Wildcard domain has to be defined as a main domain.
All domains must have A/AAAA records pointing to Træfik.
Due to ACME limitation, it's not possible to define a wildcard as a SAN (alternative domains).
It's neither possible to define a wildcard on a wildcard domain (for example `*.*.local.com`).
!!! warning
Note that Let's Encrypt has [rate limiting](https://letsencrypt.org/docs/rate-limits).
@ -428,9 +439,9 @@ Each domain & SANs will lead to a certificate request.
[ACME V2](https://community.letsencrypt.org/t/acme-v2-and-wildcard-certificate-support-is-live/55579) allows wildcard certificate support.
However, this feature needs a specific configuration.
### DNS-02 Challenge
### DNS-01 Challenge
As described in [Let's Encrypt post](https://community.letsencrypt.org/t/staging-endpoint-for-acme-v2/49605), wildcard certificates can only be generated through a `DNS-02`Challenge.
As described in [Let's Encrypt post](https://community.letsencrypt.org/t/staging-endpoint-for-acme-v2/49605), wildcard certificates can only be generated through a `DNS-01` Challenge.
This challenge is linked to the Træfik option `acme.dnsChallenge`.
```toml
@ -447,16 +458,88 @@ For more information about this option, please refer to the [dnsChallenge sectio
### Wildcard domain
Wildcard domains can currently be provided only by to the `acme.domains` option.
Theses domains can not have SANs.
```toml
[acme]
# ...
[[acme.domains]]
main = "*local1.com"
main = "*.local1.com"
sans = ["local1.com"]
[[acme.domains]]
main = "*.local2.com"
# ...
```
For more information about this option, please refer to the [domains section](/configuration/acme/#domains).
### Limitations
Let's Encrypt wildcard support have some limitations to take into account :
- Wildcard domain can not be a SAN (alternative domain),
- Wildcard domain on a wildcard domain is forbidden (for example `*.*.local.com`),
- A DNS-01 Challenge is executed for each domain (CN and SANs), DNS provider can not manage correctly this behavior as explained in the [DNS provider support section](/configuration/acme/#dns-provider-support)
### DNS provider support
All DNS providers allow creating ACME wildcard certificates.
However, many troubles can appear for wildcard domains with SANs.
If a wildcard domain is defined with it root domain as SAN, as described below, 2 DNS-01 Challenges will be executed.
```toml
[acme]
# ...
[[acme.domains]]
main = "*.local1.com"
sans = ["local1.com"]
# ...
```
When a DNS-01 Challenge is done, Let's Encrypt checks if a TXT record is created with a given name and a given value.
When a certificate is generated for a wildcard domain is defined with it root domain as SAN, the requested TXT record name for both the wildcard domain and the root domain is the same.
The [DNS RFC](https://community.letsencrypt.org/t/wildcard-issuance-two-txt-records-for-the-same-name/54528/2) allows this behavior.
But all DNS providers keep TXT records values in a cache with a TTL.
In function of the parameters given by the Træfik ACME client library ([LEGO](https://github.com/xenolf/lego)), the TXT record TTL can be superior to challenge Timeout.
In that event, the DNS-01 Challenge will not work correctly.
[LEGO](https://github.com/xenolf/lego) will involve in the way to be adapted to all of DNS providers.
Meanwhile, the table described below contains all the DNS providers supported by Træfik and indicates if they allow generating certificates for a wildcard domain and its root domain.
Do not hesitate to complete it.
| Provider Name | Provider code | Wildcard and Root Domain Support |
|--------------------------------------------------------|----------------|----------------------------------|
| [Auroradns](https://www.pcextreme.com/aurora/dns) | `auroradns` | Not tested yet |
| [Azure](https://azure.microsoft.com/services/dns/) | `azure` | Not tested yet |
| [Blue Cat](https://www.bluecatnetworks.com/) | `bluecat` | Not tested yet |
| [Cloudflare](https://www.cloudflare.com) | `cloudflare` | YES |
| [CloudXNS](https://www.cloudxns.net) | `cloudxns` | Not tested yet |
| [DigitalOcean](https://www.digitalocean.com) | `digitalocean` | YES |
| [DNSimple](https://dnsimple.com) | `dnsimple` | Not tested yet |
| [DNS Made Easy](https://dnsmadeeasy.com) | `dnsmadeeasy` | Not tested yet |
| [DNSPod](http://www.dnspod.net/) | `dnspod` | Not tested yet |
| [Duck DNS](https://www.duckdns.org/) | `duckdns` | Not tested yet |
| [Dyn](https://dyn.com) | `dyn` | Not tested yet |
| External Program | `exec` | Not tested yet |
| [Exoscale](https://www.exoscale.ch) | `exoscale` | Not tested yet |
| [Fast DNS](https://www.akamai.com/) | `fastdns` | Not tested yet |
| [Gandi](https://www.gandi.net) | `gandi` | Not tested yet |
| [Gandi V5](http://doc.livedns.gandi.net) | `gandiv5` | Not tested yet |
| [Glesys](https://glesys.com/) | `glesys` | Not tested yet |
| [GoDaddy](https://godaddy.com/domains) | `godaddy` | Not tested yet |
| [Google Cloud DNS](https://cloud.google.com/dns/docs/) | `gcloud` | YES |
| [Lightsail](https://aws.amazon.com/lightsail/) | `lightsail` | Not tested yet |
| [Linode](https://www.linode.com) | `linode` | Not tested yet |
| manual | - | YES |
| [Namecheap](https://www.namecheap.com) | `namecheap` | Not tested yet |
| [name.com](https://www.name.com/) | `namedotcom` | Not tested yet |
| [Ns1](https://ns1.com/) | `ns1` | Not tested yet |
| [Open Telekom Cloud](https://cloud.telekom.de/en/) | `otc` | Not tested yet |
| [OVH](https://www.ovh.com) | `ovh` | YES |
| [PowerDNS](https://www.powerdns.com) | `pdns` | Not tested yet |
| [Rackspace](https://www.rackspace.com/cloud/dns) | `rackspace` | Not tested yet |
| [RFC2136](https://tools.ietf.org/html/rfc2136) | `rfc2136` | Not tested yet |
| [Route 53](https://aws.amazon.com/route53/) | `route53` | YES |
| [VULTR](https://www.vultr.com) | `vultr` | Not tested yet |

View file

@ -53,7 +53,7 @@ filename = "boltdb.tmpl"
# ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/boltdb.crt"
# key = "/etc/ssl/boltdb.key"
# insecureskipverify = true
# insecureSkipVerify = true
```
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).

View file

@ -53,7 +53,7 @@ prefix = "traefik"
# ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/consul.crt"
# key = "/etc/ssl/consul.key"
# insecureskipverify = true
# insecureSkipVerify = true
```
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).

View file

@ -57,7 +57,7 @@ prefix = "traefik"
# ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/consul.crt"
# key = "/etc/ssl/consul.key"
# insecureskipverify = true
# insecureSkipVerify = true
# Override default configuration template.
# For advanced users :)

View file

@ -54,7 +54,7 @@ watch = true
# Optional
# Default: true
#
exposedbydefault = true
exposedByDefault = true
# Use the IP address from the binded port instead of the inner network one.
# For specific use-case :)
@ -69,7 +69,7 @@ usebindportip = true
# Optional
# Default: false
#
swarmmode = false
swarmMode = false
# Enable docker TLS connection.
#
@ -79,7 +79,7 @@ swarmmode = false
# ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/docker.crt"
# key = "/etc/ssl/docker.key"
# insecureskipverify = true
# insecureSkipVerify = true
```
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).
@ -89,7 +89,7 @@ To enable constraints see [backend-specific constraints section](/configuration/
```toml
################################################################
# Docker Swarmmode configuration backend
# Docker Swarm Mode configuration backend
################################################################
# Enable Docker configuration backend.
@ -123,7 +123,7 @@ watch = true
# Optional
# Default: false
#
swarmmode = true
swarmMode = true
# Override default configuration template.
# For advanced users :)
@ -146,7 +146,7 @@ swarmmode = true
# Optional
# Default: true
#
exposedbydefault = false
exposedByDefault = false
# Enable docker TLS connection.
#
@ -156,7 +156,7 @@ exposedbydefault = false
# ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/docker.crt"
# key = "/etc/ssl/docker.key"
# insecureskipverify = true
# insecureSkipVerify = true
```
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).

View file

@ -39,13 +39,13 @@ watch = true
#
refreshSeconds = 15
# AccessKeyID to use when connecting to AWS.
# Access Key ID to use when connecting to AWS.
#
# Optional
#
accessKeyID = "abc"
# SecretAccessKey to use when connecting to AWS.
# Secret Access Key to use when connecting to AWS.
#
# Optional
#

View file

@ -66,13 +66,13 @@ exposedByDefault = false
#
region = "us-east-1"
# AccessKeyID to use when connecting to AWS.
# Access Key ID to use when connecting to AWS.
#
# Optional
#
accessKeyID = "abc"
# SecretAccessKey to use when connecting to AWS.
# Secret Access Key to use when connecting to AWS.
#
# Optional
#
@ -95,7 +95,7 @@ secretAccessKey = "123"
# templateVersion = "2"
```
If `AccessKeyID`/`SecretAccessKey` is not given credentials will be resolved in the following order:
If `accessKeyID`/`secretAccessKey` is not given credentials will be resolved in the following order:
- From environment variables; `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_SESSION_TOKEN`.
- Shared credentials, determined by `AWS_PROFILE` and `AWS_SHARED_CREDENTIALS_FILE`, defaults to `default` and `~/.aws/credentials`.

View file

@ -63,7 +63,7 @@ useAPIV3 = true
# ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/etcd.crt"
# key = "/etc/ssl/etcd.key"
# insecureskipverify = true
# insecureSkipVerify = true
```
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).

View file

@ -114,7 +114,7 @@ If the service port defined in the ingress spec is 443, then the backend communi
!!! note
Please note that by enabling TLS communication between traefik and your pods, you will have to have trusted certificates that have the proper trust chain and IP subject name.
If this is not an option, you may need to skip TLS certificate verification.
See the [InsecureSkipVerify](/configuration/commons/#main-section) setting for more details.
See the [insecureSkipVerify](/configuration/commons/#main-section) setting for more details.
## Annotations

View file

@ -103,7 +103,7 @@ domain = "marathon.localhost"
# CA = "/etc/ssl/ca.crt"
# Cert = "/etc/ssl/marathon.cert"
# Key = "/etc/ssl/marathon.key"
# InsecureSkipVerify = true
# insecureSkipVerify = true
# DCOSToken for DCOS environment.
# This will override the Authorization header.

View file

@ -62,34 +62,34 @@ domain = "mesos.localhost"
# Optional
#
# [mesos.TLS]
# InsecureSkipVerify = true
# insecureSkipVerify = true
# Zookeeper timeout (in seconds).
#
# Optional
# Default: 30
#
# ZkDetectionTimeout = 30
# zkDetectionTimeout = 30
# Polling interval (in seconds).
#
# Optional
# Default: 30
#
# RefreshSeconds = 30
# refreshSeconds = 30
# IP sources (e.g. host, docker, mesos, netinfo).
#
# Optional
#
# IPSources = "host"
# ipSources = "host"
# HTTP Timeout (in seconds).
#
# Optional
# Default: 30
#
# StateTimeoutSecond = "30"
# stateTimeoutSecond = "30"
# Convert groups to subdomains.
# Default behavior: /foo/bar/myapp => foo-bar-myapp.{defaultDomain}

View file

@ -77,7 +77,7 @@ To enable constraints see [backend-specific constraints section](/configuration/
#
[rancher.metadata]
# Poll the Rancher metadata service for changes every `rancher.RefreshSeconds`.
# Poll the Rancher metadata service for changes every `rancher.refreshSeconds`.
# NOTE: this is less accurate than the default long polling technique which
# will provide near instantaneous updates to Traefik
#

View file

@ -1,33 +1,33 @@
# Service Fabric Backend
# Azure Service Fabric Backend
Træfik can be configured to use Service Fabric as a backend configuration.
Træfik can be configured to use Azure Service Fabric as a backend configuration.
See [this repository for an example deployment package and further documentation.](https://aka.ms/traefikonsf)
## Service Fabric
## Azure Service Fabric
```toml
################################################################
# Service Fabric provider
# Azure Service Fabric provider
################################################################
# Enable Service Fabric configuration backend
# Enable Azure Service Fabric configuration backend
[serviceFabric]
# Service Fabric Management Endpoint
# Azure Service Fabric Management Endpoint
#
# Required
#
clusterManagementUrl = "https://localhost:19080"
# Service Fabric Management Endpoint API Version
# Azure Service Fabric Management Endpoint API Version
#
# Required
# Default: "3.0"
#
apiVersion = "3.0"
# Service Fabric Polling Interval (in seconds)
# Azure Service Fabric Polling Interval (in seconds)
#
# Required
# Default: 10
@ -42,7 +42,7 @@ refreshSeconds = 10
# ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/servicefabric.crt"
# key = "/etc/ssl/servicefabric.key"
# insecureskipverify = true
# insecureSkipVerify = true
```
## Labels

View file

@ -53,7 +53,7 @@ prefix = "traefik"
# ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/zookeeper.crt"
# key = "/etc/ssl/zookeeper.key"
# insecureskipverify = true
# insecureSkipVerify = true
```
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).

View file

@ -38,14 +38,14 @@
# Optional
# Default: "2s"
#
# ProvidersThrottleDuration = "2s"
# providersThrottleDuration = "2s"
# Controls the maximum idle (keep-alive) connections to keep per-host.
#
# Optional
# Default: 200
#
# MaxIdleConnsPerHost = 200
# maxIdleConnsPerHost = 200
# If set to true invalid SSL certificates are accepted for backends.
# This disables detection of man-in-the-middle attacks so should only be used on secure backend networks.
@ -53,14 +53,14 @@
# Optional
# Default: false
#
# InsecureSkipVerify = true
# insecureSkipVerify = true
# Register Certificates in the RootCA.
# Register Certificates in the rootCA.
#
# Optional
# Default: []
#
# RootCAs = [ "/mycert.cert" ]
# rootCAs = [ "/mycert.cert" ]
# Entrypoints to be used by frontends that do not specify any entrypoint.
# Each frontend can specify its own entrypoints.
@ -69,6 +69,15 @@
# Default: ["http"]
#
# defaultEntryPoints = ["http", "https"]
# Allow the use of 0 as server weight.
# - false: a weight 0 means internally a weight of 1.
# - true: a weight 0 means internally a weight of 0 (a server with a weight of 0 is removed from the available servers).
#
# Optional
# Default: false
#
# AllowMinWeightZero = true
```
- `graceTimeOut`: Duration to give active requests a chance to finish before Traefik stops.
@ -76,19 +85,19 @@ Can be provided in a format supported by [time.ParseDuration](https://golang.org
If no units are provided, the value is parsed assuming seconds.
**Note:** in this time frame no new requests are accepted.
- `ProvidersThrottleDuration`: Backends throttle duration: minimum duration in seconds between 2 events from providers before applying a new configuration.
- `providersThrottleDuration`: Backends throttle duration: minimum duration in seconds between 2 events from providers before applying a new configuration.
It avoids unnecessary reloads if multiples events are sent in a short amount of time.
Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).
If no units are provided, the value is parsed assuming seconds.
- `MaxIdleConnsPerHost`: Controls the maximum idle (keep-alive) connections to keep per-host.
- `maxIdleConnsPerHost`: Controls the maximum idle (keep-alive) connections to keep per-host.
If zero, `DefaultMaxIdleConnsPerHost` from the Go standard library net/http module is used.
If you encounter 'too many open files' errors, you can either increase this value or change the `ulimit`.
- `InsecureSkipVerify` : If set to true invalid SSL certificates are accepted for backends.
- `insecureSkipVerify` : If set to true invalid SSL certificates are accepted for backends.
**Note:** This disables detection of man-in-the-middle attacks so should only be used on secure backend networks.
- `RootCAs`: Register Certificates in the RootCA. This certificates will be use for backends calls.
- `rootCAs`: Register Certificates in the RootCA. This certificates will be use for backends calls.
**Note** You can use file path or cert content directly
- `defaultEntryPoints`: Entrypoints to be used by frontends that do not specify any entrypoint.
@ -386,24 +395,24 @@ If no units are provided, the value is parsed assuming seconds.
### Idle Timeout (deprecated)
Use [respondingTimeouts](/configuration/commons/#responding-timeouts) instead of `IdleTimeout`.
Use [respondingTimeouts](/configuration/commons/#responding-timeouts) instead of `idleTimeout`.
In the case both settings are configured, the deprecated option will be overwritten.
`IdleTimeout` is the maximum amount of time an idle (keep-alive) connection will remain idle before closing itself.
`idleTimeout` is the maximum amount of time an idle (keep-alive) connection will remain idle before closing itself.
This is set to enforce closing of stale client connections.
Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).
If no units are provided, the value is parsed assuming seconds.
```toml
# IdleTimeout
# idleTimeout
#
# DEPRECATED - see [respondingTimeouts] section.
#
# Optional
# Default: "180s"
#
IdleTimeout = "360s"
idleTimeout = "360s"
```

View file

@ -15,28 +15,28 @@ Træfik supports two backends: Jaeger and Zipkin.
#
# Default: "jaeger"
#
Backend = "jaeger"
backend = "jaeger"
# Service name used in Jaeger backend
#
# Default: "traefik"
#
ServiceName = "traefik"
serviceName = "traefik"
[tracing.jaeger]
# SamplingServerURL is the address of jaeger-agent's HTTP sampling server
# Sampling Server URL is the address of jaeger-agent's HTTP sampling server
#
# Default: "http://localhost:5778/sampling"
#
SamplingServerURL = "http://localhost:5778/sampling"
samplingServerURL = "http://localhost:5778/sampling"
# Sampling Type specifies the type of the sampler: const, probabilistic, rateLimiting
#
# Default: "const"
#
SamplingType = "const"
samplingType = "const"
# SamplingParam Param is a value passed to the sampler.
# Sampling Param is a value passed to the sampler.
# Valid values for Param field are:
# - for "const" sampler, 0 or 1 for always false/true respectively
# - for "probabilistic" sampler, a probability between 0 and 1
@ -44,13 +44,13 @@ Træfik supports two backends: Jaeger and Zipkin.
#
# Default: 1.0
#
SamplingParam = 1.0
samplingParam = 1.0
# LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address
# Local Agent Host Port instructs reporter to send spans to jaeger-agent at this address
#
# Default: "127.0.0.1:6832"
#
LocalAgentHostPort = "127.0.0.1:6832"
localAgentHostPort = "127.0.0.1:6832"
```
## Zipkin
@ -62,36 +62,36 @@ Træfik supports two backends: Jaeger and Zipkin.
#
# Default: "jaeger"
#
Backend = "zipkin"
backend = "zipkin"
# Service name used in Zipkin backend
#
# Default: "traefik"
#
ServiceName = "traefik"
serviceName = "traefik"
[tracing.zipkin]
# Zipking HTTP endpoint used to send data
#
# Default: "http://localhost:9411/api/v1/spans"
#
HTTPEndpoint = "http://localhost:9411/api/v1/spans"
httpEndpoint = "http://localhost:9411/api/v1/spans"
# Enable Zipkin debug
#
# Default: false
#
Debug = false
debug = false
# Use ZipKin SameSpan RPC style traces
#
# Default: false
#
SameSpan = false
sameSpan = false
# Use ZipKin 128 bit root span IDs
#
# Default: true
#
ID128Bit = true
id128Bit = true
```

View file

@ -52,7 +52,7 @@ _(But if you'd rather configure some of your routes manually, Træfik supports t
- [Kubernetes](/configuration/backends/kubernetes/)
- [Mesos](/configuration/backends/mesos/) / [Marathon](/configuration/backends/marathon/)
- [Rancher](/configuration/backends/rancher/) (API, Metadata)
- [Service Fabric](/configuration/backends/servicefabric/)
- [Azure Service Fabric](/configuration/backends/servicefabric/)
- [Consul Catalog](/configuration/backends/consulcatalog/)
- [Consul](/configuration/backends/consul/) / [Etcd](/configuration/backends/etcd/) / [Zookeeper](/configuration/backends/zookeeper/) / [BoltDB](/configuration/backends/boltdb/)
- [Eureka](/configuration/backends/eureka/)

View file

@ -77,12 +77,12 @@ TL;DR:
```shell
$ traefik \
--docker \
--docker.swarmmode \
--docker.swarmMode \
--docker.domain=mydomain.ca \
--docker.watch
```
To enable docker and swarm-mode support, you need to add `--docker` and `--docker.swarmmode` flags.
To enable docker and swarm-mode support, you need to add `--docker` and `--docker.swarmMode` flags.
To watch docker events, add `--docker.watch`.
### Full docker-compose file
@ -101,11 +101,11 @@ services:
- "--acme.storage=/etc/traefik/acme/acme.json"
- "--acme.entryPoint=https"
- "--acme.httpChallenge.entryPoint=http"
- "--acme.OnHostRule=true"
- "--acme.onHostRule=true"
- "--acme.onDemand=false"
- "--acme.email=contact@mydomain.ca"
- "--docker"
- "--docker.swarmmode"
- "--docker.swarmMode"
- "--docker.domain=mydomain.ca"
- "--docker.watch"
volumes:
@ -211,11 +211,11 @@ services:
- "--acme.storage=traefik/acme/account"
- "--acme.entryPoint=https"
- "--acme.httpChallenge.entryPoint=http"
- "--acme.OnHostRule=true"
- "--acme.onHostRule=true"
- "--acme.onDemand=false"
- "--acme.email=foobar@example.com"
- "--docker"
- "--docker.swarmmode"
- "--docker.swarmMode"
- "--docker.domain=example.com"
- "--docker.watch"
- "--consul"

View file

@ -97,13 +97,13 @@ defaultEntryPoints = ["https","http"]
endpoint = "unix:///var/run/docker.sock"
domain = "my-awesome-app.org"
watch = true
exposedbydefault = false
exposedByDefault = false
[acme]
email = "your-email-here@my-awesome-app.org"
storage = "acme.json"
entryPoint = "https"
OnHostRule = true
onHostRule = true
[acme.httpChallenge]
entryPoint = "http"
```
@ -250,7 +250,7 @@ Træfik will create a frontend to listen to incoming HTTP requests which contain
- Always specify the correct port where the container expects HTTP traffic using `traefik.port` label.
If a container exposes multiple ports, Træfik may forward traffic to the wrong port.
Even if a container only exposes one port, you should always write configuration defensively and explicitly.
- Should you choose to enable the `exposedbydefault` flag in the `traefik.toml` configuration, be aware that all containers that are placed in the same network as Træfik will automatically be reachable from the outside world, for everyone and everyone to see.
- Should you choose to enable the `exposedByDefault` flag in the `traefik.toml` configuration, be aware that all containers that are placed in the same network as Træfik will automatically be reachable from the outside world, for everyone and everyone to see.
Usually, this is a bad idea.
- With the `traefik.frontend.auth.basic` label, it's possible for Træfik to provide a HTTP basic-auth challenge for the endpoints you provide the label for.
- Træfik has built-in support to automatically export [Prometheus](https://prometheus.io) metrics

View file

@ -89,7 +89,7 @@ This configuration allows generating Let's Encrypt certificates (thanks to `HTTP
Træfik generates these certificates when it starts and it needs to be restart if new domains are added.
### OnHostRule option (with HTTP challenge)
### onHostRule option (with HTTP challenge)
```toml
[entryPoints]
@ -225,7 +225,7 @@ These variables are described [in this section](/configuration/acme/#provider).
More information about wildcard certificates are available [in this section](/configuration/acme/#wildcard-domain).
### OnHostRule option and provided certificates (with HTTP challenge)
### onHostRule option and provided certificates (with HTTP challenge)
```toml
[entryPoints]
@ -358,7 +358,7 @@ defaultEntryPoints = ["http"]
users = ["test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"]
```
## Override the Traefik HTTP server IdleTimeout and/or throttle configurations from re-loading too quickly
## Override the Traefik HTTP server idleTimeout and/or throttle configurations from re-loading too quickly
```toml
providersThrottleDuration = "5s"

View file

@ -45,7 +45,7 @@ At last, we configure our Træfik instance to use both self-signed certificates.
defaultEntryPoints = ["https"]
# For secure connection on backend.local
RootCAs = [ "./backend.cert" ]
rootCAs = [ "./backend.cert" ]
[entryPoints]
[entryPoints.https]
@ -76,7 +76,7 @@ RootCAs = [ "./backend.cert" ]
```
!!! warning
With some backends, the server URLs use the IP, so you may need to configure `InsecureSkipVerify` instead of the `RootCAS` to activate HTTPS without hostname verification.
With some backends, the server URLs use the IP, so you may need to configure `insecureSkipVerify` instead of the `rootCAS` to activate HTTPS without hostname verification.
## Conclusion

View file

@ -398,7 +398,7 @@ It's possible to protect access to Træfik through basic authentication. (See th
### Creating the Secret
A. Use `htpasswd` to create a file containing the username and the base64-encoded password:
A. Use `htpasswd` to create a file containing the username and the MD5-encoded password:
```shell
htpasswd -c ./auth myusername

View file

@ -87,7 +87,7 @@ docker-machine ssh manager "docker service create \
--network traefik-net \
traefik \
--docker \
--docker.swarmmode \
--docker.swarmMode \
--docker.domain=traefik \
--docker.watch \
--api"
@ -101,7 +101,7 @@ Let's explain this command:
| `--constraint=node.role==manager` | we ask docker to schedule Træfik on a manager node. |
| `--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock` | we bind mount the docker socket where Træfik is scheduled to be able to speak to the daemon. |
| `--network traefik-net` | we attach the Træfik service (and thus the underlying container) to the `traefik-net` network. |
| `--docker` | enable docker backend, and `--docker.swarmmode` to enable the swarm mode on Træfik. |
| `--docker` | enable docker backend, and `--docker.swarmMode` to enable the swarm mode on Træfik. |
| `--api | activate the webUI on port 8080 |

View file

@ -29,10 +29,13 @@ entryPoint = "api"
method = "drr"
[backends.backend.servers.server1]
url = "http://127.0.0.1:8081"
weight = 1
[backends.backend.servers.server2]
url = "http://127.0.0.1:8082"
weight = 1
[backends.backend.servers.server3]
url = "http://127.0.0.1:8083"
weight = 1
[frontends]
[frontends.frontend]
backend = "backend"

View file

@ -27,18 +27,24 @@ entryPoint = "api"
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://127.0.0.1:8081"
weight = 1
[backends.backend2]
[backends.backend2.LoadBalancer]
method = "drr"
[backends.backend2.servers.server1]
url = "http://127.0.0.1:8082"
weight = 1
[backends.backend2.servers.server2]
url = "http://127.0.0.1:8083"
weight = 1
[frontends]
[frontends.frontend1]
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Path: /test1"
[frontends.frontend2]
backend = "backend2"
passHostHeader = true

View file

@ -16,7 +16,7 @@ email = "test@traefik.io"
storage = "/etc/traefik/conf/acme.json"
entryPoint = "https"
onDemand = false
OnHostRule = true
onHostRule = true
caServer = "http://traefik.boulder.com:4001/directory"
[acme.httpChallenge]
entryPoint="http"
@ -27,6 +27,6 @@ caServer = "http://traefik.boulder.com:4001/directory"
endpoint = "unix:///var/run/docker.sock"
domain = "traefik.localhost.com"
watch = true
exposedbydefault = false
exposedByDefault = false

View file

@ -13,7 +13,7 @@ defaultEntryPoints = ["http", "https"]
email = "test@traefik.io"
storage = "traefik/acme/account"
entryPoint = "https"
OnHostRule = true
onHostRule = true
caServer = "http://traefik.boulder.com:4001/directory"
[acme.httpChallenge]
entryPoint="http"
@ -25,4 +25,4 @@ entryPoint="http"
endpoint = "unix:///var/run/docker.sock"
domain = "localhost.com"
watch = true
exposedbydefault = false
exposedByDefault = false

View file

@ -195,6 +195,8 @@ func (s *AcmeSuite) retrieveAcmeCertificate(c *check.C, testCase AcmeTestCase) {
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer cmd.Process.Kill()
// A real file is needed to have the right mode on acme.json file
defer os.Remove("/tmp/acme.json")
backend := startTestServer("9010", http.StatusOK)
defer backend.Close()

View file

@ -12,10 +12,10 @@ defaultEntryPoints = ["http", "https"]
[acme]
email = "test@traefik.io"
storage = "/dev/null"
storage = "/tmp/acme.json"
entryPoint = "https"
onDemand = {{.OnDemand}}
OnHostRule = {{.OnHostRule}}
onHostRule = {{.OnHostRule}}
caServer = "http://{{.BoulderHost}}:4001/directory"
[acme.httpchallenge]
entrypoint="http"
@ -26,6 +26,7 @@ caServer = "http://{{.BoulderHost}}:4001/directory"
[backends.backend]
[backends.backend.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[frontends]

View file

@ -11,10 +11,10 @@ defaultEntryPoints = ["http", "https"]
[acme]
email = "test@traefik.io"
storage = "/dev/null"
storage = "/tmp/acme.json"
entryPoint = "https"
onDemand = {{.OnDemand}}
OnHostRule = {{.OnHostRule}}
onHostRule = {{.OnHostRule}}
caServer = "http://{{.BoulderHost}}:4001/directory"
[acme.httpchallenge]
entrypoint="http"
@ -28,6 +28,7 @@ path="/traefik"
[backends.backend]
[backends.backend.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[frontends]
[frontends.frontend]

View file

@ -14,10 +14,10 @@ defaultEntryPoints = ["http", "https"]
[acme]
email = "test@traefik.io"
storage = "/dev/null"
storage = "/tmp/acme.json"
entryPoint = "https"
onDemand = {{.OnDemand}}
OnHostRule = {{.OnHostRule}}
onHostRule = {{.OnHostRule}}
caServer = "http://{{.BoulderHost}}:4001/directory"
[acme.httpChallenge]
entryPoint="http"
@ -28,6 +28,7 @@ entryPoint="http"
[backends.backend]
[backends.backend.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[frontends]

View file

@ -12,10 +12,10 @@ defaultEntryPoints = ["http", "https"]
[acme]
email = "test@traefik.io"
storage = "/dev/null"
storage = "/tmp/acme.json"
entryPoint = "https"
onDemand = {{.OnDemand}}
OnHostRule = {{.OnHostRule}}
onHostRule = {{.OnHostRule}}
caServer = "http://{{.BoulderHost}}:4001/directory"
[acme.httpChallenge]
entryPoint="http"

View file

@ -2,6 +2,7 @@
[backends.backend]
[backends.backend.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[frontends]
[frontends.frontend]

View file

@ -14,9 +14,9 @@ defaultEntryPoints = ["http", "https"]
[acme]
email = "test@traefik.io"
storage = "/dev/null"
storage = "/tmp/acme.json"
entryPoint = "https"
OnHostRule = true
onHostRule = true
caServer = "http://{{.BoulderHost}}:4001/directory"
# No challenge defined
@ -26,6 +26,7 @@ caServer = "http://{{.BoulderHost}}:4001/directory"
[backends.backend]
[backends.backend.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[frontends]

View file

@ -14,9 +14,9 @@ defaultEntryPoints = ["http", "https"]
[acme]
email = "test@traefik.io"
storage = "/dev/null"
storage = "/tmp/acme.json"
entryPoint = "https"
OnHostRule = true
onHostRule = true
caServer = "http://wrongurl:4001/directory"
[file]
@ -25,6 +25,7 @@ caServer = "http://wrongurl:4001/directory"
[backends.backend]
[backends.backend.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[frontends]

View file

@ -14,4 +14,4 @@ logLevel = "DEBUG"
endpoint = "{{.DockerHost}}"
domain = "docker.localhost"
exposedbydefault = true
exposedByDefault = true

View file

@ -9,10 +9,10 @@ logLevel = "DEBUG"
address = ":8081"
[dynamodb]
AccessKeyID = "key"
SecretAccessKey = "secret"
Endpoint = "{{.DynamoURL}}"
Region = "us-east-1"
accessKeyID = "key"
secretAccessKey = "secret"
endpoint = "{{.DynamoURL}}"
region = "us-east-1"
[api]
entryPoint = "api"

View file

@ -11,17 +11,20 @@ logLevel = "DEBUG"
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://{{.Server1}}:8989474"
weight = 1
[backends.error]
[backends.error.servers.error]
url = "http://{{.Server2}}:80"
weight = 1
[frontends]
[frontends.frontend1]
passHostHeader = true
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Host:test.local"
[frontends.frontend1.errors]
[frontends.frontend1.errors.networks]
status = ["500-502", "503-599"]
backend = "error"
query = "/50x.html"
[frontends.frontend1]
passHostHeader = true
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Host:test.local"
[frontends.frontend1.errors]
[frontends.frontend1.errors.networks]
status = ["500-502", "503-599"]
backend = "error"
query = "/50x.html"

View file

@ -11,17 +11,20 @@ logLevel = "DEBUG"
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://{{.Server1}}:80"
weight = 1
[backends.error]
[backends.error.servers.error]
url = "http://{{.Server2}}:80"
weight = 1
[frontends]
[frontends.frontend1]
passHostHeader = true
backend = "backend1"
[frontends.frontend1]
passHostHeader = true
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Host:test.local"
[frontends.frontend1.errors]
[frontends.frontend1.errors.networks]
status = ["500-502", "503-599"]
backend = "error"
query = "/50x.html"
[frontends.frontend1.errors.networks]
status = ["500-502", "503-599"]
backend = "error"
query = "/50x.html"

View file

@ -3,6 +3,7 @@
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://172.17.0.2:80"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -3,6 +3,7 @@
[backends.backend2]
[backends.backend2.servers.server1]
url = "http://172.17.0.2:80"
weight = 1
[frontends]
[frontends.frontend2]

View file

@ -1,6 +1,6 @@
defaultEntryPoints = ["https"]
RootCAs = [ """{{ .CertContent }}""" ]
rootCAs = [ """{{ .CertContent }}""" ]
[entryPoints]
[entryPoints.https]
@ -19,6 +19,7 @@ RootCAs = [ """{{ .CertContent }}""" ]
[backends.backend1]
[backends.backend1.servers.server1]
url = "https://127.0.0.1:{{ .GRPCServerPort }}"
weight = 1
[frontends]

View file

@ -1,6 +1,6 @@
defaultEntryPoints = ["https"]
InsecureSkipVerify = true
insecureSkipVerify = true
[entryPoints]
[entryPoints.https]
@ -19,6 +19,7 @@ InsecureSkipVerify = true
[backends.backend1]
[backends.backend1.servers.server1]
url = "https://127.0.0.1:{{ .GRPCServerPort }}"
weight = 1
[frontends]

View file

@ -20,8 +20,10 @@ logLevel = "DEBUG"
interval = "1s"
[backends.backend1.servers.server1]
url = "http://{{.Server1}}:80"
weight = 1
[backends.backend1.servers.server2]
url = "http://{{.Server2}}:80"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -20,8 +20,10 @@ logLevel = "DEBUG"
interval = "1s"
[backends.backend1.servers.server1]
url = "http://{{.Server1}}:80"
weight = 1
[backends.backend1.servers.server2]
url = "http://{{.Server2}}:80"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -17,6 +17,7 @@ logLevel = "DEBUG"
interval = "1s"
[backends.backend1.servers.server1]
url = "http://{{.Server1}}:81"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -16,8 +16,10 @@ logLevel = "DEBUG"
interval = "1s"
[backends.backend1.servers.server1]
url = "http://{{.Server1}}:80"
weight = 1
[backends.backend1.servers.server2]
url = "http://{{.Server2}}:80"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -24,9 +24,11 @@ defaultEntryPoints = ["https"]
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[backends.backend2]
[backends.backend2.servers.server1]
url = "http://127.0.0.1:9020"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -23,9 +23,11 @@ defaultEntryPoints = ["https"]
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[backends.backend2]
[backends.backend2.servers.server1]
url = "http://127.0.0.1:9020"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -24,9 +24,11 @@ defaultEntryPoints = ["https"]
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[backends.backend2]
[backends.backend2.servers.server1]
url = "http://127.0.0.1:9020"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -2,9 +2,11 @@
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[backends.backend2]
[backends.backend2.servers.server1]
url = "http://127.0.0.1:9020"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -21,9 +21,11 @@ defaultEntryPoints = ["https"]
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[backends.backend2]
[backends.backend2.servers.server1]
url = "http://127.0.0.1:9020"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -3,7 +3,7 @@ logLevel = "DEBUG"
defaultEntryPoints = ["http"]
# Use certificate in net/internal/testcert.go
RootCAs = [ """
rootCAs = [ """
-----BEGIN CERTIFICATE-----
MIICEzCCAXygAwIBAgIQMIMChMLGrR+QvmQvpwAU6zANBgkqhkiG9w0BAQsFADAS
MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw
@ -32,6 +32,7 @@ fblo6RBxUQ==
[backends.backend1]
[backends.backend1.servers.server1]
url = "{{ .BackendHost }}"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -3,7 +3,7 @@ logLevel = "DEBUG"
defaultEntryPoints = ["http"]
# Use certificate in net/internal/testcert.go
RootCAs = [ "fixtures/https/rootcas/local.crt"]
rootCAs = [ "fixtures/https/rootcas/local.crt"]
[entryPoints]
[entryPoints.http]
@ -17,6 +17,8 @@ RootCAs = [ "fixtures/https/rootcas/local.crt"]
[backends.backend1]
[backends.backend1.servers.server1]
url = "{{ .BackendHost }}"
weight = 1
[frontends]
[frontends.frontend1]
backend = "backend1"

View file

@ -27,12 +27,14 @@ entryPoint = "api"
################################################################
# rules
################################################################
[backends]
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://127.0.0.1:8081"
[frontends]
[frontends.frontend1]
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Path: /test1"
[backends]
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://127.0.0.1:8081"
weight = 1
[frontends]
[frontends.frontend1]
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Path: /test1"

View file

@ -11,7 +11,7 @@ debug=true
[docker]
endpoint = "unix:///var/run/docker.sock"
watch = true
exposedbydefault = false
exposedByDefault = false
[file]
[frontends]
@ -19,7 +19,9 @@ exposedbydefault = false
backend = "backend-test"
[frontends.frontend-1.routes.test_1]
rule = "PathPrefix:/file"
[backends]
[backends.backend-test]
[backends.backend-test.servers.website]
url = "http://{{ .IP }}"
url = "http://{{ .IP }}"
weight = 1

View file

@ -11,16 +11,16 @@ defaultEntryPoints = ["http", "https"]
[acme]
email = "test@traefik.io"
storage = "/dev/null"
entryPoint = "https"
onDemand = {{.OnDemand}}
OnHostRule = {{.OnHostRule}}
caServer = "http://{{.BoulderHost}}:4001/directory"
[acme.httpChallenge]
entryPoint="http"
[[acme.domains]]
main = "traefik.acme.wtf"
email = "test@traefik.io"
storage = "/tmp/acme.json"
entryPoint = "https"
onDemand = {{.OnDemand}}
onHostRule = {{.OnHostRule}}
caServer = "http://{{.BoulderHost}}:4001/directory"
[acme.httpChallenge]
entryPoint="http"
[[acme.domains]]
main = "traefik.acme.wtf"
[api]
@ -31,6 +31,7 @@ main = "traefik.acme.wtf"
[backends.backend]
[backends.backend.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[frontends]
[frontends.frontend]

View file

@ -11,17 +11,17 @@ defaultEntryPoints = ["http", "https"]
[acme]
email = "test@traefik.io"
storage = "/dev/null"
entryPoint = "https"
onDemand = false
OnHostRule = false
caServer = "http://{{.BoulderHost}}:4001/directory"
[acme.httpChallenge]
entryPoint="http"
[[acme.domains]]
main = "acme.wtf"
sans = [ "traefik.acme.wtf" ]
email = "test@traefik.io"
storage = "/tmp/acme.json"
entryPoint = "https"
onDemand = false
onHostRule = false
caServer = "http://{{.BoulderHost}}:4001/directory"
[acme.httpChallenge]
entryPoint="http"
[[acme.domains]]
main = "acme.wtf"
sans = [ "traefik.acme.wtf" ]
[api]
@ -32,6 +32,7 @@ sans = [ "traefik.acme.wtf" ]
[backends.backend]
[backends.backend.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[frontends]
[frontends.frontend]

View file

@ -11,14 +11,14 @@ defaultEntryPoints = ["http", "https"]
[acme]
email = "test@traefik.io"
storage = "/dev/null"
entryPoint = "https"
onDemand = {{.OnDemand}}
OnHostRule = {{.OnHostRule}}
caServer = "http://{{.BoulderHost}}:4001/directory"
[acme.httpChallenge]
entryPoint="http"
email = "test@traefik.io"
storage = "/tmp/acme.jsonl"
entryPoint = "https"
onDemand = {{.OnDemand}}
onHostRule = {{.OnHostRule}}
caServer = "http://{{.BoulderHost}}:4001/directory"
[acme.httpChallenge]
entryPoint="http"
[api]
@ -28,6 +28,7 @@ entryPoint="http"
[backends.backend]
[backends.backend.servers.server1]
url = "http://127.0.0.1:9010"
weight = 1
[frontends]
[frontends.frontend]

View file

@ -2,22 +2,23 @@ logLevel = "DEBUG"
defaultEntryPoints = ["http"]
[entryPoints]
[entryPoints.http]
address = ":8000"
[entryPoints.http.proxyProtocol]
trustedIPs = ["{{.HaproxyIP}}"]
[entryPoints.http]
address = ":8000"
[entryPoints.http.proxyProtocol]
trustedIPs = ["{{.HaproxyIP}}"]
[api]
[file]
[backends]
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://{{.WhoamiIP}}"
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://{{.WhoamiIP}}"
weight = 1
[frontends]
[frontends.frontend1]
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Path:/whoami"
[frontends.frontend1]
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Path:/whoami"

View file

@ -2,22 +2,23 @@ logLevel = "DEBUG"
defaultEntryPoints = ["http"]
[entryPoints]
[entryPoints.http]
address = ":8000"
[entryPoints.http.proxyProtocol]
trustedIPs = ["1.2.3.4"]
[entryPoints.http]
address = ":8000"
[entryPoints.http.proxyProtocol]
trustedIPs = ["1.2.3.4"]
[api]
[file]
[backends]
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://{{.WhoamiIP}}"
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://{{.WhoamiIP}}"
weight = 1
[frontends]
[frontends.frontend1]
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Path:/whoami"
[frontends.frontend1]
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Path:/whoami"

View file

@ -12,19 +12,21 @@ logLevel = "DEBUG"
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://{{.Server1}}:80"
weight = 1
[frontends]
[frontends.frontend1]
passHostHeader = true
backend = "backend1"
[frontends.frontend1]
passHostHeader = true
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Path:/"
[frontends.frontend1.ratelimit]
extractorfunc = "client.ip"
[frontends.frontend1.ratelimit.rateset.rateset1]
period = "60s"
average = 4
burst = 5
[frontends.frontend1.ratelimit.rateset.rateset2]
period = "3s"
average = 1
burst = 2
[frontends.frontend1.ratelimit.rateset.rateset1]
period = "60s"
average = 4
burst = 5
[frontends.frontend1.ratelimit.rateset.rateset2]
period = "3s"
average = 1
burst = 2

View file

@ -17,6 +17,7 @@ logLevel = "DEBUG"
[backends.backend]
[backends.backend.servers.server]
url = "{{.Server}}"
weight = 1
[frontends]
[frontends.frontend]

View file

@ -15,8 +15,10 @@ logLevel = "DEBUG"
[backends.backend1]
[backends.backend1.servers.server1]
url = "http://{{.WhoamiEndpoint}}:8080" # not valid
weight = 1
[backends.backend1.servers.server2]
url = "http://{{.WhoamiEndpoint}}:80"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -5,26 +5,29 @@ debug=true
address = ":8000"
[api]
[file]
[backends]
[backends.backend1]
[backends.backend1.servers.server1]
url = "{{ .Server1 }}"
[backends]
[backends.backend1]
[backends.backend1.servers.server1]
url = "{{ .Server1 }}"
weight = 1
[backends.backend2]
[backends.backend2.servers.server1]
url = "{{ .Server2 }}"
[backends.backend2.servers.server1]
url = "{{ .Server2 }}"
weight = 1
[frontends]
[frontends.frontend1]
entrypoints=["http"]
[frontends]
[frontends.frontend1]
entrypoints=["http"]
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "PathPrefix:/whoami"
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "PathPrefix:/whoami"
[frontends.frontend2]
backend = "backend2"
entrypoints=["traefik"]
[frontends.frontend2.routes.test_1]
rule = "PathPrefix:/whoami"
[frontends.frontend2]
backend = "backend2"
entrypoints=["traefik"]
[frontends.frontend2.routes.test_1]
rule = "PathPrefix:/whoami"

View file

@ -22,9 +22,11 @@ responseHeaderTimeout = "300ms"
# Non-routable IP address that should always deliver a dial timeout.
# See: https://stackoverflow.com/questions/100841/artificially-create-a-connection-timeout-error#answer-904609
url = "http://50.255.255.1"
weight = 1
[backends.backend2]
[backends.backend2.servers.server2]
url = "http://{{.TimeoutEndpoint}}:9000"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -6,32 +6,38 @@ debug = true
[api]
[entryPoints]
[entryPoints.http]
[entryPoints.http]
address = ":8000"
[tracing]
backend = "{{.TracingBackend}}"
servicename = "tracing"
backend = "{{.TracingBackend}}"
servicename = "tracing"
[tracing.zipkin]
HTTPEndpoint = "http://{{.ZipkinIP}}:9411/api/v1/spans"
debug = true
httpEndpoint = "http://{{.ZipkinIP}}:9411/api/v1/spans"
debug = true
[tracing.jaeger]
SamplingType = "const"
SamplingParam = 1.0
samplingType = "const"
samplingParam = 1.0
[retry]
attempts = 3
attempts = 3
[file]
[backends]
[backends.backend1]
[backends.backend1.servers.server-ratelimit]
url = "http://{{.WhoAmiIP}}:{{.WhoAmiPort}}"
weight = 1
[backends.backend2]
[backends.backend2.servers.server-retry]
url = "http://{{.WhoAmiIP}}:{{.WhoAmiPort}}"
weight = 1
[backends.backend3]
[backends.backend3.servers.server-auth]
url = "http://{{.WhoAmiIP}}:{{.WhoAmiPort}}"
weight = 1
[frontends]
[frontends.frontend1]
passHostHeader = true

View file

@ -6,7 +6,6 @@ logLevel = "DEBUG"
[entryPoints.http]
address = ":8000"
[api]
[file]
@ -15,6 +14,7 @@ logLevel = "DEBUG"
[backends.backend1]
[backends.backend1.servers.server1]
url = "{{ .WebsocketServer }}"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -1,7 +1,7 @@
defaultEntryPoints = ["wss"]
logLevel = "DEBUG"
InsecureSkipVerify=true
insecureSkipVerify=true
[entryPoints]
[entryPoints.wss]
@ -19,6 +19,7 @@ InsecureSkipVerify=true
[backends.backend1]
[backends.backend1.servers.server1]
url = "{{ .WebsocketServer }}"
weight = 1
[frontends]
[frontends.frontend1]

View file

@ -1,174 +0,0 @@
package middlewares
import (
"bufio"
"bytes"
"net"
"net/http"
"strconv"
"strings"
"github.com/containous/traefik/log"
"github.com/containous/traefik/types"
"github.com/vulcand/oxy/forward"
"github.com/vulcand/oxy/utils"
)
// Compile time validation that the response recorder implements http interfaces correctly.
var _ Stateful = &errorPagesResponseRecorderWithCloseNotify{}
//ErrorPagesHandler is a middleware that provides the custom error pages
type ErrorPagesHandler struct {
HTTPCodeRanges types.HTTPCodeRanges
BackendURL string
errorPageForwarder *forward.Forwarder
}
//NewErrorPagesHandler initializes the utils.ErrorHandler for the custom error pages
func NewErrorPagesHandler(errorPage *types.ErrorPage, backendURL string) (*ErrorPagesHandler, error) {
fwd, err := forward.New()
if err != nil {
return nil, err
}
httpCodeRanges, err := types.NewHTTPCodeRanges(errorPage.Status)
if err != nil {
return nil, err
}
return &ErrorPagesHandler{
HTTPCodeRanges: httpCodeRanges,
BackendURL: backendURL + errorPage.Query,
errorPageForwarder: fwd},
nil
}
func (ep *ErrorPagesHandler) ServeHTTP(w http.ResponseWriter, req *http.Request, next http.HandlerFunc) {
recorder := newErrorPagesResponseRecorder(w)
next.ServeHTTP(recorder, req)
w.WriteHeader(recorder.GetCode())
//check the recorder code against the configured http status code ranges
for _, block := range ep.HTTPCodeRanges {
if recorder.GetCode() >= block[0] && recorder.GetCode() <= block[1] {
log.Errorf("Caught HTTP Status Code %d, returning error page", recorder.GetCode())
finalURL := strings.Replace(ep.BackendURL, "{status}", strconv.Itoa(recorder.GetCode()), -1)
if newReq, err := http.NewRequest(http.MethodGet, finalURL, nil); err != nil {
w.Write([]byte(http.StatusText(recorder.GetCode())))
} else {
ep.errorPageForwarder.ServeHTTP(w, newReq)
}
return
}
}
//did not catch a configured status code so proceed with the request
utils.CopyHeaders(w.Header(), recorder.Header())
w.Write(recorder.GetBody().Bytes())
}
type errorPagesResponseRecorder interface {
http.ResponseWriter
http.Flusher
GetCode() int
GetBody() *bytes.Buffer
IsStreamingResponseStarted() bool
}
// newErrorPagesResponseRecorder returns an initialized responseRecorder.
func newErrorPagesResponseRecorder(rw http.ResponseWriter) errorPagesResponseRecorder {
recorder := &errorPagesResponseRecorderWithoutCloseNotify{
HeaderMap: make(http.Header),
Body: new(bytes.Buffer),
Code: http.StatusOK,
responseWriter: rw,
}
if _, ok := rw.(http.CloseNotifier); ok {
return &errorPagesResponseRecorderWithCloseNotify{recorder}
}
return recorder
}
// errorPagesResponseRecorderWithoutCloseNotify is an implementation of http.ResponseWriter that
// records its mutations for later inspection.
type errorPagesResponseRecorderWithoutCloseNotify struct {
Code int // the HTTP response code from WriteHeader
HeaderMap http.Header // the HTTP response headers
Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to
responseWriter http.ResponseWriter
err error
streamingResponseStarted bool
}
type errorPagesResponseRecorderWithCloseNotify struct {
*errorPagesResponseRecorderWithoutCloseNotify
}
// CloseNotify returns a channel that receives at most a
// single value (true) when the client connection has gone
// away.
func (rw *errorPagesResponseRecorderWithCloseNotify) CloseNotify() <-chan bool {
return rw.responseWriter.(http.CloseNotifier).CloseNotify()
}
// Header returns the response headers.
func (rw *errorPagesResponseRecorderWithoutCloseNotify) Header() http.Header {
m := rw.HeaderMap
if m == nil {
m = make(http.Header)
rw.HeaderMap = m
}
return m
}
func (rw *errorPagesResponseRecorderWithoutCloseNotify) GetCode() int {
return rw.Code
}
func (rw *errorPagesResponseRecorderWithoutCloseNotify) GetBody() *bytes.Buffer {
return rw.Body
}
func (rw *errorPagesResponseRecorderWithoutCloseNotify) IsStreamingResponseStarted() bool {
return rw.streamingResponseStarted
}
// Write always succeeds and writes to rw.Body, if not nil.
func (rw *errorPagesResponseRecorderWithoutCloseNotify) Write(buf []byte) (int, error) {
if rw.err != nil {
return 0, rw.err
}
return rw.Body.Write(buf)
}
// WriteHeader sets rw.Code.
func (rw *errorPagesResponseRecorderWithoutCloseNotify) WriteHeader(code int) {
rw.Code = code
}
// Hijack hijacks the connection
func (rw *errorPagesResponseRecorderWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return rw.responseWriter.(http.Hijacker).Hijack()
}
// Flush sends any buffered data to the client.
func (rw *errorPagesResponseRecorderWithoutCloseNotify) Flush() {
if !rw.streamingResponseStarted {
utils.CopyHeaders(rw.responseWriter.Header(), rw.Header())
rw.responseWriter.WriteHeader(rw.Code)
rw.streamingResponseStarted = true
}
_, err := rw.responseWriter.Write(rw.Body.Bytes())
if err != nil {
log.Errorf("Error writing response in responseRecorder: %s", err)
rw.err = err
}
rw.Body.Reset()
flusher, ok := rw.responseWriter.(http.Flusher)
if ok {
flusher.Flush()
}
}

View file

@ -1,202 +0,0 @@
package middlewares
import (
"fmt"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"github.com/containous/traefik/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/urfave/negroni"
)
func TestErrorPage(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Test Server")
}))
defer ts.Close()
testErrorPage := &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}}
testHandler, err := NewErrorPagesHandler(testErrorPage, ts.URL)
require.NoError(t, err)
assert.Equal(t, testHandler.BackendURL, ts.URL+"/test", "Should be equal")
recorder := httptest.NewRecorder()
req, err := http.NewRequest(http.MethodGet, ts.URL+"/test", nil)
require.NoError(t, err)
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "traefik")
})
n := negroni.New()
n.Use(testHandler)
n.UseHandler(handler)
n.ServeHTTP(recorder, req)
assert.Equal(t, http.StatusOK, recorder.Code, "HTTP status")
assert.Contains(t, recorder.Body.String(), "traefik")
// ----
handler500 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintln(w, "oops")
})
recorder500 := httptest.NewRecorder()
n500 := negroni.New()
n500.Use(testHandler)
n500.UseHandler(handler500)
n500.ServeHTTP(recorder500, req)
assert.Equal(t, http.StatusInternalServerError, recorder500.Code, "HTTP status Internal Server Error")
assert.Contains(t, recorder500.Body.String(), "Test Server")
assert.NotContains(t, recorder500.Body.String(), "oops", "Should not return the oops page")
handler502 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadGateway)
fmt.Fprintln(w, "oops")
})
recorder502 := httptest.NewRecorder()
n502 := negroni.New()
n502.Use(testHandler)
n502.UseHandler(handler502)
n502.ServeHTTP(recorder502, req)
assert.Equal(t, http.StatusBadGateway, recorder502.Code, "HTTP status Bad Gateway")
assert.Contains(t, recorder502.Body.String(), "oops")
assert.NotContains(t, recorder502.Body.String(), "Test Server", "Should return the oops page since we have not configured the 502 code")
}
func TestErrorPageQuery(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.RequestURI() == "/"+strconv.Itoa(503) {
fmt.Fprintln(w, "503 Test Server")
} else {
fmt.Fprintln(w, "Failed")
}
}))
defer ts.Close()
testErrorPage := &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503-503"}}
testHandler, err := NewErrorPagesHandler(testErrorPage, ts.URL)
require.NoError(t, err)
assert.Equal(t, testHandler.BackendURL, ts.URL+"/{status}", "Should be equal")
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintln(w, "oops")
})
recorder := httptest.NewRecorder()
req, err := http.NewRequest(http.MethodGet, ts.URL+"/test", nil)
require.NoError(t, err)
n := negroni.New()
n.Use(testHandler)
n.UseHandler(handler)
n.ServeHTTP(recorder, req)
assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status Service Unavailable")
assert.Contains(t, recorder.Body.String(), "503 Test Server")
assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page")
}
func TestErrorPageSingleCode(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.RequestURI() == "/"+strconv.Itoa(503) {
fmt.Fprintln(w, "503 Test Server")
} else {
fmt.Fprintln(w, "Failed")
}
}))
defer ts.Close()
testErrorPage := &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503"}}
testHandler, err := NewErrorPagesHandler(testErrorPage, ts.URL)
require.NoError(t, err)
assert.Equal(t, testHandler.BackendURL, ts.URL+"/{status}", "Should be equal")
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintln(w, "oops")
})
recorder := httptest.NewRecorder()
req, err := http.NewRequest(http.MethodGet, ts.URL+"/test", nil)
require.NoError(t, err)
n := negroni.New()
n.Use(testHandler)
n.UseHandler(handler)
n.ServeHTTP(recorder, req)
assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status Service Unavailable")
assert.Contains(t, recorder.Body.String(), "503 Test Server")
assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page")
}
func TestNewErrorPagesResponseRecorder(t *testing.T) {
testCases := []struct {
desc string
rw http.ResponseWriter
expected http.ResponseWriter
}{
{
desc: "Without Close Notify",
rw: httptest.NewRecorder(),
expected: &errorPagesResponseRecorderWithoutCloseNotify{},
},
{
desc: "With Close Notify",
rw: &mockRWCloseNotify{},
expected: &errorPagesResponseRecorderWithCloseNotify{},
},
}
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
rec := newErrorPagesResponseRecorder(test.rw)
assert.IsType(t, rec, test.expected)
})
}
}
type mockRWCloseNotify struct{}
func (m *mockRWCloseNotify) CloseNotify() <-chan bool {
panic("implement me")
}
func (m *mockRWCloseNotify) Header() http.Header {
panic("implement me")
}
func (m *mockRWCloseNotify) Write([]byte) (int, error) {
panic("implement me")
}
func (m *mockRWCloseNotify) WriteHeader(int) {
panic("implement me")
}

View file

@ -0,0 +1,205 @@
package errorpages
import (
"bufio"
"bytes"
"net"
"net/http"
"strconv"
"strings"
"github.com/containous/traefik/log"
"github.com/containous/traefik/middlewares"
"github.com/containous/traefik/types"
"github.com/pkg/errors"
"github.com/vulcand/oxy/forward"
"github.com/vulcand/oxy/utils"
)
// Compile time validation that the response recorder implements http interfaces correctly.
var _ middlewares.Stateful = &responseRecorderWithCloseNotify{}
// Handler is a middleware that provides the custom error pages
type Handler struct {
BackendName string
backendHandler http.Handler
httpCodeRanges types.HTTPCodeRanges
backendURL string
backendQuery string
FallbackURL string // Deprecated
}
// NewHandler initializes the utils.ErrorHandler for the custom error pages
func NewHandler(errorPage *types.ErrorPage, backendName string) (*Handler, error) {
if len(backendName) == 0 {
return nil, errors.New("error pages: backend name is mandatory ")
}
httpCodeRanges, err := types.NewHTTPCodeRanges(errorPage.Status)
if err != nil {
return nil, err
}
return &Handler{
BackendName: backendName,
httpCodeRanges: httpCodeRanges,
backendQuery: errorPage.Query,
backendURL: "http://0.0.0.0",
}, nil
}
// PostLoad adds backend handler if available
func (h *Handler) PostLoad(backendHandler http.Handler) error {
if backendHandler == nil {
fwd, err := forward.New()
if err != nil {
return err
}
h.backendHandler = fwd
h.backendURL = h.FallbackURL
} else {
h.backendHandler = backendHandler
}
return nil
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request, next http.HandlerFunc) {
if h.backendHandler == nil {
log.Error("Error pages: no backend handler.")
next.ServeHTTP(w, req)
return
}
recorder := newResponseRecorder(w)
next.ServeHTTP(recorder, req)
w.WriteHeader(recorder.GetCode())
// check the recorder code against the configured http status code ranges
for _, block := range h.httpCodeRanges {
if recorder.GetCode() >= block[0] && recorder.GetCode() <= block[1] {
log.Errorf("Caught HTTP Status Code %d, returning error page", recorder.GetCode())
var query string
if len(h.backendQuery) > 0 {
query = "/" + strings.TrimPrefix(h.backendQuery, "/")
query = strings.Replace(query, "{status}", strconv.Itoa(recorder.GetCode()), -1)
}
if newReq, err := http.NewRequest(http.MethodGet, h.backendURL+query, nil); err != nil {
w.Write([]byte(http.StatusText(recorder.GetCode())))
} else {
h.backendHandler.ServeHTTP(w, newReq)
}
return
}
}
// did not catch a configured status code so proceed with the request
utils.CopyHeaders(w.Header(), recorder.Header())
w.Write(recorder.GetBody().Bytes())
}
type responseRecorder interface {
http.ResponseWriter
http.Flusher
GetCode() int
GetBody() *bytes.Buffer
IsStreamingResponseStarted() bool
}
// newResponseRecorder returns an initialized responseRecorder.
func newResponseRecorder(rw http.ResponseWriter) responseRecorder {
recorder := &responseRecorderWithoutCloseNotify{
HeaderMap: make(http.Header),
Body: new(bytes.Buffer),
Code: http.StatusOK,
responseWriter: rw,
}
if _, ok := rw.(http.CloseNotifier); ok {
return &responseRecorderWithCloseNotify{recorder}
}
return recorder
}
// responseRecorderWithoutCloseNotify is an implementation of http.ResponseWriter that
// records its mutations for later inspection.
type responseRecorderWithoutCloseNotify struct {
Code int // the HTTP response code from WriteHeader
HeaderMap http.Header // the HTTP response headers
Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to
responseWriter http.ResponseWriter
err error
streamingResponseStarted bool
}
type responseRecorderWithCloseNotify struct {
*responseRecorderWithoutCloseNotify
}
// CloseNotify returns a channel that receives at most a
// single value (true) when the client connection has gone away.
func (rw *responseRecorderWithCloseNotify) CloseNotify() <-chan bool {
return rw.responseWriter.(http.CloseNotifier).CloseNotify()
}
// Header returns the response headers.
func (rw *responseRecorderWithoutCloseNotify) Header() http.Header {
if rw.HeaderMap == nil {
rw.HeaderMap = make(http.Header)
}
return rw.HeaderMap
}
func (rw *responseRecorderWithoutCloseNotify) GetCode() int {
return rw.Code
}
func (rw *responseRecorderWithoutCloseNotify) GetBody() *bytes.Buffer {
return rw.Body
}
func (rw *responseRecorderWithoutCloseNotify) IsStreamingResponseStarted() bool {
return rw.streamingResponseStarted
}
// Write always succeeds and writes to rw.Body, if not nil.
func (rw *responseRecorderWithoutCloseNotify) Write(buf []byte) (int, error) {
if rw.err != nil {
return 0, rw.err
}
return rw.Body.Write(buf)
}
// WriteHeader sets rw.Code.
func (rw *responseRecorderWithoutCloseNotify) WriteHeader(code int) {
rw.Code = code
}
// Hijack hijacks the connection
func (rw *responseRecorderWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return rw.responseWriter.(http.Hijacker).Hijack()
}
// Flush sends any buffered data to the client.
func (rw *responseRecorderWithoutCloseNotify) Flush() {
if !rw.streamingResponseStarted {
utils.CopyHeaders(rw.responseWriter.Header(), rw.Header())
rw.responseWriter.WriteHeader(rw.Code)
rw.streamingResponseStarted = true
}
_, err := rw.responseWriter.Write(rw.Body.Bytes())
if err != nil {
log.Errorf("Error writing response in responseRecorder: %s", err)
rw.err = err
}
rw.Body.Reset()
if flusher, ok := rw.responseWriter.(http.Flusher); ok {
flusher.Flush()
}
}

View file

@ -0,0 +1,383 @@
package errorpages
import (
"fmt"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"github.com/containous/traefik/testhelpers"
"github.com/containous/traefik/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/urfave/negroni"
)
func TestHandler(t *testing.T) {
testCases := []struct {
desc string
errorPage *types.ErrorPage
backendCode int
backendErrorHandler http.HandlerFunc
validate func(t *testing.T, recorder *httptest.ResponseRecorder)
}{
{
desc: "no error",
errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}},
backendCode: http.StatusOK,
backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "My error page.")
}),
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusOK, recorder.Code, "HTTP status")
assert.Contains(t, recorder.Body.String(), http.StatusText(http.StatusOK))
},
},
{
desc: "in the range",
errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}},
backendCode: http.StatusInternalServerError,
backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "My error page.")
}),
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusInternalServerError, recorder.Code, "HTTP status")
assert.Contains(t, recorder.Body.String(), "My error page.")
assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page")
},
},
{
desc: "not in the range",
errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}},
backendCode: http.StatusBadGateway,
backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "My error page.")
}),
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusBadGateway, recorder.Code, "HTTP status")
assert.Contains(t, recorder.Body.String(), http.StatusText(http.StatusBadGateway))
assert.NotContains(t, recorder.Body.String(), "Test Server", "Should return the oops page since we have not configured the 502 code")
},
},
{
desc: "query replacement",
errorPage: &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503-503"}},
backendCode: http.StatusServiceUnavailable,
backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.RequestURI() == "/"+strconv.Itoa(503) {
fmt.Fprintln(w, "My 503 page.")
} else {
fmt.Fprintln(w, "Failed")
}
}),
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status")
assert.Contains(t, recorder.Body.String(), "My 503 page.")
assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page")
},
},
{
desc: "Single code",
errorPage: &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503"}},
backendCode: http.StatusServiceUnavailable,
backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.RequestURI() == "/"+strconv.Itoa(503) {
fmt.Fprintln(w, "My 503 page.")
} else {
fmt.Fprintln(w, "Failed")
}
}),
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status")
assert.Contains(t, recorder.Body.String(), "My 503 page.")
assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page")
},
},
}
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
errorPageHandler, err := NewHandler(test.errorPage, "test")
require.NoError(t, err)
errorPageHandler.backendHandler = test.backendErrorHandler
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(test.backendCode)
fmt.Fprintln(w, http.StatusText(test.backendCode))
})
req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost/test", nil)
n := negroni.New()
n.Use(errorPageHandler)
n.UseHandler(handler)
recorder := httptest.NewRecorder()
n.ServeHTTP(recorder, req)
test.validate(t, recorder)
})
}
}
func TestHandlerOldWay(t *testing.T) {
testCases := []struct {
desc string
errorPage *types.ErrorPage
backendCode int
errorPageForwarder http.HandlerFunc
validate func(t *testing.T, recorder *httptest.ResponseRecorder)
}{
{
desc: "no error",
errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}},
backendCode: http.StatusOK,
errorPageForwarder: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "My error page.")
}),
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusOK, recorder.Code, "HTTP status")
assert.Contains(t, recorder.Body.String(), "OK")
},
},
{
desc: "in the range",
errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}},
backendCode: http.StatusInternalServerError,
errorPageForwarder: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "My error page.")
}),
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusInternalServerError, recorder.Code)
assert.Contains(t, recorder.Body.String(), "My error page.")
assert.NotContains(t, recorder.Body.String(), http.StatusText(http.StatusInternalServerError), "Should not return the oops page")
},
},
{
desc: "not in the range",
errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}},
backendCode: http.StatusBadGateway,
errorPageForwarder: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "My error page.")
}),
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusBadGateway, recorder.Code)
assert.Contains(t, recorder.Body.String(), http.StatusText(http.StatusBadGateway))
assert.NotContains(t, recorder.Body.String(), "My error page.", "Should return the oops page since we have not configured the 502 code")
},
},
{
desc: "query replacement",
errorPage: &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503-503"}},
backendCode: http.StatusServiceUnavailable,
errorPageForwarder: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.RequestURI() == "/"+strconv.Itoa(503) {
fmt.Fprintln(w, "My 503 page.")
} else {
fmt.Fprintln(w, "Failed")
}
}),
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status")
assert.Contains(t, recorder.Body.String(), "My 503 page.")
assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page")
},
},
{
desc: "Single code",
errorPage: &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503"}},
backendCode: http.StatusServiceUnavailable,
errorPageForwarder: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.RequestURI() == "/"+strconv.Itoa(503) {
fmt.Fprintln(w, "My 503 page.")
} else {
fmt.Fprintln(w, "Failed")
}
}),
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status")
assert.Contains(t, recorder.Body.String(), "My 503 page.")
assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page")
},
},
}
req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost/test", nil)
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
errorPageHandler, err := NewHandler(test.errorPage, "test")
require.NoError(t, err)
errorPageHandler.FallbackURL = "http://localhost"
errorPageHandler.PostLoad(test.errorPageForwarder)
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(test.backendCode)
fmt.Fprintln(w, http.StatusText(test.backendCode))
})
n := negroni.New()
n.Use(errorPageHandler)
n.UseHandler(handler)
recorder := httptest.NewRecorder()
n.ServeHTTP(recorder, req)
test.validate(t, recorder)
})
}
}
func TestHandlerOldWayIntegration(t *testing.T) {
errorPagesServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.RequestURI() == "/"+strconv.Itoa(503) {
fmt.Fprintln(w, "My 503 page.")
} else {
fmt.Fprintln(w, "Test Server")
}
}))
defer errorPagesServer.Close()
testCases := []struct {
desc string
errorPage *types.ErrorPage
backendCode int
validate func(t *testing.T, recorder *httptest.ResponseRecorder)
}{
{
desc: "no error",
errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}},
backendCode: http.StatusOK,
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusOK, recorder.Code, "HTTP status")
assert.Contains(t, recorder.Body.String(), "OK")
},
},
{
desc: "in the range",
errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}},
backendCode: http.StatusInternalServerError,
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusInternalServerError, recorder.Code)
assert.Contains(t, recorder.Body.String(), "Test Server")
assert.NotContains(t, recorder.Body.String(), http.StatusText(http.StatusInternalServerError), "Should not return the oops page")
},
},
{
desc: "not in the range",
errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}},
backendCode: http.StatusBadGateway,
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusBadGateway, recorder.Code)
assert.Contains(t, recorder.Body.String(), http.StatusText(http.StatusBadGateway))
assert.NotContains(t, recorder.Body.String(), "Test Server", "Should return the oops page since we have not configured the 502 code")
},
},
{
desc: "query replacement",
errorPage: &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503-503"}},
backendCode: http.StatusServiceUnavailable,
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status")
assert.Contains(t, recorder.Body.String(), "My 503 page.")
assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page")
},
},
{
desc: "Single code",
errorPage: &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503"}},
backendCode: http.StatusServiceUnavailable,
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status")
assert.Contains(t, recorder.Body.String(), "My 503 page.")
assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page")
},
},
}
req := testhelpers.MustNewRequest(http.MethodGet, errorPagesServer.URL+"/test", nil)
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
errorPageHandler, err := NewHandler(test.errorPage, "test")
require.NoError(t, err)
errorPageHandler.FallbackURL = errorPagesServer.URL
err = errorPageHandler.PostLoad(nil)
require.NoError(t, err)
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(test.backendCode)
fmt.Fprintln(w, http.StatusText(test.backendCode))
})
n := negroni.New()
n.Use(errorPageHandler)
n.UseHandler(handler)
recorder := httptest.NewRecorder()
n.ServeHTTP(recorder, req)
test.validate(t, recorder)
})
}
}
func TestNewResponseRecorder(t *testing.T) {
testCases := []struct {
desc string
rw http.ResponseWriter
expected http.ResponseWriter
}{
{
desc: "Without Close Notify",
rw: httptest.NewRecorder(),
expected: &responseRecorderWithoutCloseNotify{},
},
{
desc: "With Close Notify",
rw: &mockRWCloseNotify{},
expected: &responseRecorderWithCloseNotify{},
},
}
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
rec := newResponseRecorder(test.rw)
assert.IsType(t, rec, test.expected)
})
}
}
type mockRWCloseNotify struct{}
func (m *mockRWCloseNotify) CloseNotify() <-chan bool {
panic("implement me")
}
func (m *mockRWCloseNotify) Header() http.Header {
panic("implement me")
}
func (m *mockRWCloseNotify) Write([]byte) (int, error) {
panic("implement me")
}
func (m *mockRWCloseNotify) WriteHeader(int) {
panic("implement me")
}

View file

@ -84,7 +84,7 @@ pages:
- 'Backend: Mesos': 'configuration/backends/mesos.md'
- 'Backend: Rancher': 'configuration/backends/rancher.md'
- 'Backend: Rest': 'configuration/backends/rest.md'
- 'Backend: Service Fabric': 'configuration/backends/servicefabric.md'
- 'Backend: Azure Service Fabric': 'configuration/backends/servicefabric.md'
- 'Backend: Zookeeper': 'configuration/backends/zookeeper.md'
- 'API / Dashboard': 'configuration/api.md'
- 'Ping': 'configuration/ping.md'

View file

@ -16,7 +16,7 @@ var _ Store = (*LocalStore)(nil)
type LocalStore struct {
filename string
storedData *StoredData
SaveDataChan chan *StoredData
SaveDataChan chan *StoredData `json:"-"`
}
// NewLocalStore initializes a new LocalStore with a file name
@ -30,31 +30,38 @@ func (s *LocalStore) get() (*StoredData, error) {
if s.storedData == nil {
s.storedData = &StoredData{HTTPChallenges: make(map[string]map[string][]byte)}
f, err := os.Open(s.filename)
if err != nil {
return nil, err
}
defer f.Close()
file, err := ioutil.ReadAll(f)
hasData, err := CheckFile(s.filename)
if err != nil {
return nil, err
}
if len(file) > 0 {
if err := json.Unmarshal(file, s.storedData); err != nil {
return nil, err
}
}
// Check if ACME Account is in ACME V1 format
if s.storedData.Account != nil && s.storedData.Account.Registration != nil {
isOldRegistration, err := regexp.MatchString(RegistrationURLPathV1Regexp, s.storedData.Account.Registration.URI)
if hasData {
f, err := os.Open(s.filename)
if err != nil {
return nil, err
}
if isOldRegistration {
s.storedData.Account = nil
s.SaveDataChan <- s.storedData
defer f.Close()
file, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
if len(file) > 0 {
if err := json.Unmarshal(file, s.storedData); err != nil {
return nil, err
}
}
// Check if ACME Account is in ACME V1 format
if s.storedData.Account != nil && s.storedData.Account.Registration != nil {
isOldRegistration, err := regexp.MatchString(RegistrationURLPathV1Regexp, s.storedData.Account.Registration.URI)
if err != nil {
return nil, err
}
if isOldRegistration {
s.storedData.Account = nil
s.SaveDataChan <- s.storedData
}
}
}
}

View file

@ -7,10 +7,17 @@ import (
"os"
)
// Check file permissions and content size
func checkFile(name string) (bool, error) {
// CheckFile checks file permissions and content size
func CheckFile(name string) (bool, error) {
f, err := os.Open(name)
if err != nil {
if os.IsNotExist(err) {
f, err = os.Create(name)
if err != nil {
return false, err
}
return false, f.Chmod(0600)
}
return false, err
}
defer f.Close()

View file

@ -2,11 +2,18 @@ package acme
import "os"
// Check file content size
// CheckFile checks file content size
// Do not check file permissions on Windows right now
func checkFile(name string) (bool, error) {
func CheckFile(name string) (bool, error) {
f, err := os.Open(name)
if err != nil {
if os.IsNotExist(err) {
f, err = os.Create(name)
if err != nil {
return false, err
}
return false, f.Chmod(0600)
}
return false, err
}
defer f.Close()

View file

@ -23,7 +23,7 @@ import (
traefikTLS "github.com/containous/traefik/tls"
"github.com/containous/traefik/types"
"github.com/pkg/errors"
"github.com/xenolf/lego/acmev2"
acme "github.com/xenolf/lego/acmev2"
"github.com/xenolf/lego/providers/dns"
)
@ -116,7 +116,7 @@ func (p *Provider) init() error {
p.certificates, err = p.Store.GetCertificates()
if err != nil {
return fmt.Errorf("unable to get ACME account : %v", err)
return fmt.Errorf("unable to get ACME certificates : %v", err)
}
p.watchCertificate()
@ -424,7 +424,7 @@ func (p *Provider) refreshCertificates() {
for _, cert := range p.certificates {
certificate := &traefikTLS.Certificate{CertFile: traefikTLS.FileOrContent(cert.Certificate), KeyFile: traefikTLS.FileOrContent(cert.Key)}
config.Configuration.TLS = append(config.Configuration.TLS, &traefikTLS.Configuration{Certificate: certificate})
config.Configuration.TLS = append(config.Configuration.TLS, &traefikTLS.Configuration{Certificate: certificate, EntryPoints: []string{p.EntryPoint}})
}
p.configurationChan <- config
}
@ -565,16 +565,16 @@ func (p *Provider) getValidDomains(domain types.Domain, wildcardAllowed bool) ([
if p.DNSChallenge == nil {
return nil, fmt.Errorf("unable to generate a wildcard certificate in ACME provider for domain %q : ACME needs a DNSChallenge", strings.Join(domains, ","))
}
if len(domain.SANs) > 0 {
return nil, fmt.Errorf("unable to generate a wildcard certificate in ACME provider for domain %q : SANs are not allowed", strings.Join(domains, ","))
}
} else {
for _, san := range domain.SANs {
if strings.HasPrefix(san, "*") {
return nil, fmt.Errorf("unable to generate a certificate in ACME provider for domains %q: SANs can not be a wildcard domain", strings.Join(domains, ","))
}
if strings.HasPrefix(domain.Main, "*.*") {
return nil, fmt.Errorf("unable to generate a wildcard certificate in ACME provider for domain %q : ACME does not allow '*.*' wildcard domain", strings.Join(domains, ","))
}
}
for _, san := range domain.SANs {
if strings.HasPrefix(san, "*") {
return nil, fmt.Errorf("unable to generate a certificate in ACME provider for domains %q: SAN %q can not be a wildcard domain", strings.Join(domains, ","), san)
}
}
domains = fun.Map(types.CanonicalDomain, domains).([]string)
return domains, nil
}
@ -610,26 +610,31 @@ func (p *Provider) deleteUnnecessaryDomains() {
keepDomain = false
}
break
} else if strings.HasPrefix(domain.Main, "*") && domain.SANs == nil {
// Check if domains can be validated by the wildcard domain
var newDomainsToCheck []string
for _, domainProcessed := range domainToCheck.ToStrArray() {
if isDomainAlreadyChecked(domainProcessed, domain.ToStrArray()) {
log.Warnf("Domain %q will not be processed by ACME provider because it is validated by the wildcard %q", domainProcessed, domain.Main)
continue
}
newDomainsToCheck = append(newDomainsToCheck, domainProcessed)
}
// Delete the domain if both Main and SANs can be validated by the wildcard domain
// otherwise keep the unchecked values
if newDomainsToCheck == nil {
keepDomain = false
break
}
domainToCheck.Set(newDomainsToCheck)
}
// Check if CN or SANS to check already exists
// or can not be checked by a wildcard
var newDomainsToCheck []string
for _, domainProcessed := range domainToCheck.ToStrArray() {
if idxDomain < idxDomainToCheck && isDomainAlreadyChecked(domainProcessed, domain.ToStrArray()) {
// The domain is duplicated in a CN
log.Warnf("Domain %q is duplicated in the configuration or validated by the domain %v. It will be processed once.", domainProcessed, domain)
continue
} else if domain.Main != domainProcessed && strings.HasPrefix(domain.Main, "*") && isDomainAlreadyChecked(domainProcessed, []string{domain.Main}) {
// Check if a wildcard can validate the domain
log.Warnf("Domain %q will not be processed by ACME provider because it is validated by the wildcard %q", domainProcessed, domain.Main)
continue
}
newDomainsToCheck = append(newDomainsToCheck, domainProcessed)
}
// Delete the domain if both Main and SANs can be validated by the wildcard domain
// otherwise keep the unchecked values
if newDomainsToCheck == nil {
keepDomain = false
break
}
domainToCheck.Set(newDomainsToCheck)
}
if keepDomain {

View file

@ -207,11 +207,27 @@ func TestGetValidDomain(t *testing.T) {
expectedDomains: nil,
},
{
desc: "unexpected SANs",
domains: types.Domain{Main: "*.traefik.wtf", SANs: []string{"foo.traefik.wtf"}},
desc: "unauthorized wildcard with SAN",
domains: types.Domain{Main: "*.*.traefik.wtf", SANs: []string{"foo.traefik.wtf"}},
dnsChallenge: &DNSChallenge{},
wildcardAllowed: true,
expectedErr: "unable to generate a wildcard certificate in ACME provider for domain \"*.traefik.wtf,foo.traefik.wtf\" : SANs are not allowed",
expectedErr: "unable to generate a wildcard certificate in ACME provider for domain \"*.*.traefik.wtf,foo.traefik.wtf\" : ACME does not allow '*.*' wildcard domain",
expectedDomains: nil,
},
{
desc: "wildcard and SANs",
domains: types.Domain{Main: "*.traefik.wtf", SANs: []string{"traefik.wtf"}},
dnsChallenge: &DNSChallenge{},
wildcardAllowed: true,
expectedErr: "",
expectedDomains: []string{"*.traefik.wtf", "traefik.wtf"},
},
{
desc: "unexpected SANs",
domains: types.Domain{Main: "*.traefik.wtf", SANs: []string{"*.acme.wtf"}},
dnsChallenge: &DNSChallenge{},
wildcardAllowed: true,
expectedErr: "unable to generate a certificate in ACME provider for domains \"*.traefik.wtf,*.acme.wtf\": SAN \"*.acme.wtf\" can not be a wildcard domain",
expectedDomains: nil,
},
}
@ -251,8 +267,8 @@ func TestDeleteUnnecessaryDomains(t *testing.T) {
Main: "*.foo.acme.wtf",
},
{
Main: "acme.wtf",
SANs: []string{"traefik.acme.wtf", "bar.foo"},
Main: "acme02.wtf",
SANs: []string{"traefik.acme02.wtf", "bar.foo"},
},
},
expectedDomains: []types.Domain{
@ -262,15 +278,38 @@ func TestDeleteUnnecessaryDomains(t *testing.T) {
},
{
Main: "*.foo.acme.wtf",
SANs: []string{},
},
{
Main: "acme.wtf",
SANs: []string{"traefik.acme.wtf", "bar.foo"},
Main: "acme02.wtf",
SANs: []string{"traefik.acme02.wtf", "bar.foo"},
},
},
},
{
desc: "2 domains with same values",
desc: "wildcard and root domain",
domains: []types.Domain{
{
Main: "acme.wtf",
},
{
Main: "*.acme.wtf",
SANs: []string{"acme.wtf"},
},
},
expectedDomains: []types.Domain{
{
Main: "acme.wtf",
SANs: []string{},
},
{
Main: "*.acme.wtf",
SANs: []string{},
},
},
},
{
desc: "2 equals domains",
domains: []types.Domain{
{
Main: "acme.wtf",
@ -288,6 +327,29 @@ func TestDeleteUnnecessaryDomains(t *testing.T) {
},
},
},
{
desc: "2 domains with same values",
domains: []types.Domain{
{
Main: "acme.wtf",
SANs: []string{"traefik.acme.wtf"},
},
{
Main: "acme.wtf",
SANs: []string{"traefik.acme.wtf", "foo.bar"},
},
},
expectedDomains: []types.Domain{
{
Main: "acme.wtf",
SANs: []string{"traefik.acme.wtf"},
},
{
Main: "foo.bar",
SANs: []string{},
},
},
},
{
desc: "domain totally checked by wildcard",
domains: []types.Domain{
@ -302,6 +364,25 @@ func TestDeleteUnnecessaryDomains(t *testing.T) {
expectedDomains: []types.Domain{
{
Main: "*.acme.wtf",
SANs: []string{},
},
},
},
{
desc: "duplicated wildcard",
domains: []types.Domain{
{
Main: "*.acme.wtf",
SANs: []string{"acme.wtf"},
},
{
Main: "*.acme.wtf",
},
},
expectedDomains: []types.Domain{
{
Main: "*.acme.wtf",
SANs: []string{"acme.wtf"},
},
},
},
@ -315,6 +396,10 @@ func TestDeleteUnnecessaryDomains(t *testing.T) {
{
Main: "*.acme.wtf",
},
{
Main: "who.acme.wtf",
SANs: []string{"traefik.acme.wtf", "bar.acme.wtf"},
},
},
expectedDomains: []types.Domain{
{
@ -323,6 +408,7 @@ func TestDeleteUnnecessaryDomains(t *testing.T) {
},
{
Main: "*.acme.wtf",
SANs: []string{},
},
},
},

View file

@ -39,8 +39,8 @@ func (p *Provider) buildConfigurationV2(catalog []catalogUpdate) *types.Configur
"getFrontendRule": p.getFrontendRule,
"getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic),
"getFrontEndEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints),
"getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriorityInt),
"getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeaderBool),
"getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority),
"getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader),
"getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert),
"getWhiteList": label.GetWhiteList,
"getRedirect": label.GetRedirect,
@ -192,7 +192,7 @@ func getServerName(node *api.ServiceEntry, index int) string {
}
func (p *Provider) getWeight(tags []string) int {
weight := p.getIntAttribute(label.SuffixWeight, tags, label.DefaultWeightInt)
weight := p.getIntAttribute(label.SuffixWeight, tags, label.DefaultWeight)
// Deprecated
deprecatedWeightTag := "backend." + label.SuffixWeight
@ -200,7 +200,7 @@ func (p *Provider) getWeight(tags []string) int {
log.Warnf("Deprecated configuration found: %s. Please use %s.",
p.getPrefixedName(deprecatedWeightTag), p.getPrefixedName(label.SuffixWeight))
weight = p.getIntAttribute(deprecatedWeightTag, tags, label.DefaultWeightInt)
weight = p.getIntAttribute(deprecatedWeightTag, tags, label.DefaultWeight)
}
return weight

View file

@ -36,8 +36,8 @@ func (p *Provider) buildConfigurationV1(catalog []catalogUpdate) *types.Configur
"getFrontendRule": p.getFrontendRuleV1,
"getBasicAuth": p.getFuncSliceAttribute(label.SuffixFrontendAuthBasic),
"getEntryPoints": getEntryPointsV1,
"getPriority": p.getFuncIntAttribute(label.SuffixFrontendPriority, label.DefaultFrontendPriorityInt),
"getPassHostHeader": p.getFuncBoolAttribute(label.SuffixFrontendPassHostHeader, label.DefaultPassHostHeaderBool),
"getPriority": p.getFuncIntAttribute(label.SuffixFrontendPriority, label.DefaultFrontendPriority),
"getPassHostHeader": p.getFuncBoolAttribute(label.SuffixFrontendPassHostHeader, label.DefaultPassHostHeader),
"getPassTLSCert": p.getFuncBoolAttribute(label.SuffixFrontendPassTLSCert, label.DefaultPassTLSCert),
}

View file

@ -40,8 +40,8 @@ func (p *Provider) buildConfigurationV2(containersInspected []dockerData) *types
// Frontend functions
"getBackendName": getBackendName,
"getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriorityInt),
"getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeaderBool),
"getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority),
"getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader),
"getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert),
"getEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints),
"getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic),
@ -318,7 +318,7 @@ func (p *Provider) getServers(containers []dockerData) map[string]types.Server {
servers[provider.Normalize(serverName)] = types.Server{
URL: fmt.Sprintf("%s://%s:%s", protocol, ip, port),
Weight: label.GetIntValue(container.SegmentLabels, label.TraefikWeight, label.DefaultWeightInt),
Weight: label.GetIntValue(container.SegmentLabels, label.TraefikWeight, label.DefaultWeight),
}
}

View file

@ -57,7 +57,7 @@ func TestDockerBuildConfiguration(t *testing.T) {
Servers: map[string]types.Server{
"server-test": {
URL: "http://127.0.0.1:80",
Weight: 0,
Weight: label.DefaultWeight,
},
},
CircuitBreaker: nil,
@ -236,12 +236,12 @@ func TestDockerBuildConfiguration(t *testing.T) {
"foo": {
Status: []string{"404"},
Query: "foo_query",
Backend: "foobar",
Backend: "backend-foobar",
},
"bar": {
Status: []string{"500", "600"},
Query: "bar_query",
Backend: "foobar",
Backend: "backend-foobar",
},
},
RateLimit: &types.RateLimit{

View file

@ -59,7 +59,7 @@ func TestSwarmBuildConfiguration(t *testing.T) {
Servers: map[string]types.Server{
"server-test": {
URL: "http://127.0.0.1:80",
Weight: 0,
Weight: label.DefaultWeight,
},
},
},
@ -243,12 +243,12 @@ func TestSwarmBuildConfiguration(t *testing.T) {
"foo": {
Status: []string{"404"},
Query: "foo_query",
Backend: "foobar",
Backend: "backend-foobar",
},
"bar": {
Status: []string{"500", "600"},
Query: "bar_query",
Backend: "foobar",
Backend: "backend-foobar",
},
},
RateLimit: &types.RateLimit{

Some files were not shown because too many files have changed in this diff Show more