Merge tag 'v1.4.0-rc3' into master

This commit is contained in:
Fernandez Ludovic 2017-09-18 21:52:44 +02:00
commit 2cbf9cae71
93 changed files with 2772 additions and 1141 deletions

View file

@ -1,5 +1,30 @@
# Change Log # Change Log
## [v1.4.0-rc3](https://github.com/containous/traefik/tree/v1.4.0-rc3) (2017-09-18)
[All Commits](https://github.com/containous/traefik/compare/v1.4.0-rc2...v1.4.0-rc3)
**Enhancements:**
- **[acme]** Display Traefik logs in integration tests ([#2114](https://github.com/containous/traefik/pull/2114) by [ldez](https://github.com/ldez))
- **[authentication]** Manage Headers for the Authentication forwarding. ([#2132](https://github.com/containous/traefik/pull/2132) by [ldez](https://github.com/ldez))
- Fail fast in IT and fix some flaky tests ([#2126](https://github.com/containous/traefik/pull/2126) by [ldez](https://github.com/ldez))
**Bug fixes:**
- **[consul]** Fix Consul Catalog refresh ([#2089](https://github.com/containous/traefik/pull/2089) by [Juliens](https://github.com/Juliens))
- **[healthcheck]** Fix healthcheck port ([#2131](https://github.com/containous/traefik/pull/2131) by [fredix](https://github.com/fredix))
- **[logs,middleware]** Enable loss less rotation of log files ([#2062](https://github.com/containous/traefik/pull/2062) by [marco-jantke](https://github.com/marco-jantke))
- **[rancher]** Add stack name to backend name generation to fix rancher metadata backend ([#2107](https://github.com/containous/traefik/pull/2107) by [SantoDE](https://github.com/SantoDE))
- **[rancher]** Rancher host IP address ([#2101](https://github.com/containous/traefik/pull/2101) by [matq007](https://github.com/matq007))
- Fixes entry points configuration. ([#2120](https://github.com/containous/traefik/pull/2120) by [ldez](https://github.com/ldez))
**Documentation:**
- **[acme,provider]** Enhance documentation readability. ([#2095](https://github.com/containous/traefik/pull/2095) by [ldez](https://github.com/ldez))
- **[api]** Add examples of proxying ping ([#2102](https://github.com/containous/traefik/pull/2102) by [deitch](https://github.com/deitch))
- **[k8s]** Add guide section on production advice, esp. CPU. ([#2113](https://github.com/containous/traefik/pull/2113) by [timoreimann](https://github.com/timoreimann))
- **[metrics]** Enhance web backend documentation ([#2122](https://github.com/containous/traefik/pull/2122) by [ldez](https://github.com/ldez))
- Add forward auth documentation. ([#2110](https://github.com/containous/traefik/pull/2110) by [ldez](https://github.com/ldez))
- User guide gRPC ([#2108](https://github.com/containous/traefik/pull/2108) by [Juliens](https://github.com/Juliens))
- Document custom error page restrictions. ([#2104](https://github.com/containous/traefik/pull/2104) by [timoreimann](https://github.com/timoreimann))
## [v1.4.0-rc2](https://github.com/containous/traefik/tree/v1.4.0-rc2) (2017-09-08) ## [v1.4.0-rc2](https://github.com/containous/traefik/tree/v1.4.0-rc2) (2017-09-08)
[All Commits](https://github.com/containous/traefik/compare/v1.4.0-rc1...v1.4.0-rc2) [All Commits](https://github.com/containous/traefik/compare/v1.4.0-rc1...v1.4.0-rc2)

View file

@ -2,7 +2,7 @@
## Building ## Building
You need either [Docker](https://github.com/docker/docker) and `make` (Method 1), or `go` (Method 2) in order to build traefik. For changes to its dependencies, the `glide` dependency management tool and `glide-vc` plugin are required. You need either [Docker](https://github.com/docker/docker) and `make` (Method 1), or `go` (Method 2) in order to build Traefik. For changes to its dependencies, the `glide` dependency management tool and `glide-vc` plugin are required.
### Method 1: Using `Docker` and `Makefile` ### Method 1: Using `Docker` and `Makefile`

View file

@ -17,20 +17,20 @@ It supports several backends ([Docker](https://www.docker.com/), [Swarm mode](ht
--- ---
| **[Overview](#overview)** | . **[Overview](#overview)** .
**[Features](#features)** | **[Features](#features)** .
**[Supported backends](#supported-backends)** | **[Supported backends](#supported-backends)** .
**[Quickstart](#quickstart)** | **[Quickstart](#quickstart)** .
**[Web UI](#web-ui)** | **[Web UI](#web-ui)** .
**[Test it](#test-it)** | **[Test it](#test-it)** .
**[Documentation](#documentation)** | **[Documentation](#documentation)** .
**[Support](#support)** |
**[Release cycle](#release-cycle)** |
| **[Contributing](#contributing)** | . **[Support](#support)** .
**[Maintainers](#maintainers)** | **[Release cycle](#release-cycle)** .
**[Plumbing](#plumbing)** | **[Contributing](#contributing)** .
**[Credits](#credits)** | **[Maintainers](#maintainers)** .
**[Plumbing](#plumbing)** .
**[Credits](#credits)** .
--- ---
@ -61,7 +61,7 @@ Run it and forget it!
- [It's fast](https://docs.traefik.io/benchmarks) - [It's fast](https://docs.traefik.io/benchmarks)
- No dependency hell, single binary made with go - No dependency hell, single binary made with go
- [Tiny](https://microbadger.com/images/traefik) [official](https://hub.docker.com/r/_/traefik/) official docker image - [Tiny](https://microbadger.com/images/traefik) [official](https://hub.docker.com/r/_/traefik/) docker image
- Rest API - Rest API
- Hot-reloading of configuration. No need to restart the process - Hot-reloading of configuration. No need to restart the process
- Circuit breakers, retry - Circuit breakers, retry

View file

@ -178,7 +178,7 @@ func (dc *DomainsCertificates) renewCertificates(acmeCert *Certificate, domain D
return nil return nil
} }
} }
return fmt.Errorf("Certificate to renew not found for domain %s", domain.Main) return fmt.Errorf("certificate to renew not found for domain %s", domain.Main)
} }
func (dc *DomainsCertificates) addCertificateForDomains(acmeCert *Certificate, domain Domain) (*DomainsCertificate, error) { func (dc *DomainsCertificates) addCertificateForDomains(acmeCert *Certificate, domain Domain) (*DomainsCertificate, error) {

View file

@ -43,7 +43,7 @@ func (c *challengeProvider) getCertificate(domain string) (cert *tls.Certificate
} }
} }
} }
return fmt.Errorf("Cannot find challenge cert for domain %s", domain) return fmt.Errorf("cannot find challenge cert for domain %s", domain)
} }
notify := func(err error, time time.Duration) { notify := func(err error, time time.Duration) {
log.Errorf("Error getting cert: %v, retrying in %s", err, time) log.Errorf("Error getting cert: %v, retrying in %s", err, time)

View file

@ -1,60 +0,0 @@
package auth
import (
"io/ioutil"
"net/http"
"github.com/containous/traefik/log"
"github.com/containous/traefik/types"
)
// Forward the authentication to a external server
func Forward(forward *types.Forward, w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
httpClient := http.Client{}
if forward.TLS != nil {
tlsConfig, err := forward.TLS.CreateTLSConfig()
if err != nil {
log.Debugf("Impossible to configure TLS to call %s. Cause %s", forward.Address, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
httpClient.Transport = &http.Transport{
TLSClientConfig: tlsConfig,
}
}
forwardReq, err := http.NewRequest(http.MethodGet, forward.Address, nil)
if err != nil {
log.Debugf("Error calling %s. Cause %s", forward.Address, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
forwardReq.Header = r.Header
forwardResponse, forwardErr := httpClient.Do(forwardReq)
if forwardErr != nil {
log.Debugf("Error calling %s. Cause: %s", forward.Address, forwardErr)
w.WriteHeader(http.StatusInternalServerError)
return
}
body, readError := ioutil.ReadAll(forwardResponse.Body)
if readError != nil {
log.Debugf("Error reading body %s. Cause: %s", forward.Address, readError)
w.WriteHeader(http.StatusInternalServerError)
return
}
defer forwardResponse.Body.Close()
if forwardResponse.StatusCode < http.StatusOK || forwardResponse.StatusCode >= http.StatusMultipleChoices {
log.Debugf("Remote error %s. StatusCode: %d", forward.Address, forwardResponse.StatusCode)
w.WriteHeader(forwardResponse.StatusCode)
w.Write(body)
return
}
r.RequestURI = r.URL.RequestURI()
next(w, r)
}

View file

@ -12,6 +12,7 @@ import (
"github.com/containous/traefik/provider/dynamodb" "github.com/containous/traefik/provider/dynamodb"
"github.com/containous/traefik/provider/ecs" "github.com/containous/traefik/provider/ecs"
"github.com/containous/traefik/provider/etcd" "github.com/containous/traefik/provider/etcd"
"github.com/containous/traefik/provider/eureka"
"github.com/containous/traefik/provider/file" "github.com/containous/traefik/provider/file"
"github.com/containous/traefik/provider/kubernetes" "github.com/containous/traefik/provider/kubernetes"
"github.com/containous/traefik/provider/marathon" "github.com/containous/traefik/provider/marathon"
@ -148,6 +149,10 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration {
defaultDynamoDB.TableName = "traefik" defaultDynamoDB.TableName = "traefik"
defaultDynamoDB.Watch = true defaultDynamoDB.Watch = true
// default Eureka
var defaultEureka eureka.Provider
defaultEureka.Delay = "30s"
// default AccessLog // default AccessLog
defaultAccessLog := types.AccessLog{ defaultAccessLog := types.AccessLog{
Format: accesslog.CommonFormat, Format: accesslog.CommonFormat,
@ -168,6 +173,7 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration {
Mesos: &defaultMesos, Mesos: &defaultMesos,
ECS: &defaultECS, ECS: &defaultECS,
Rancher: &defaultRancher, Rancher: &defaultRancher,
Eureka: &defaultEureka,
DynamoDB: &defaultDynamoDB, DynamoDB: &defaultDynamoDB,
Retry: &configuration.Retry{}, Retry: &configuration.Retry{},
HealthCheck: &configuration.HealthCheckConfig{}, HealthCheck: &configuration.HealthCheckConfig{},

View file

@ -193,18 +193,11 @@ func (ep *EntryPoints) String() string {
// Set's argument is a string to be parsed to set the flag. // Set's argument is a string to be parsed to set the flag.
// It's a comma-separated list, so we split it. // It's a comma-separated list, so we split it.
func (ep *EntryPoints) Set(value string) error { func (ep *EntryPoints) Set(value string) error {
regex := regexp.MustCompile(`(?:Name:(?P<Name>\S*))\s*(?:Address:(?P<Address>\S*))?\s*(?:TLS:(?P<TLS>\S*))?\s*((?P<TLSACME>TLS))?\s*(?:CA:(?P<CA>\S*))?\s*(?:Redirect.EntryPoint:(?P<RedirectEntryPoint>\S*))?\s*(?:Redirect.Regex:(?P<RedirectRegex>\\S*))?\s*(?:Redirect.Replacement:(?P<RedirectReplacement>\S*))?\s*(?:Compress:(?P<Compress>\S*))?\s*(?:WhiteListSourceRange:(?P<WhiteListSourceRange>\S*))?\s*(?:ProxyProtocol:(?P<ProxyProtocol>\S*))?`) result, err := parseEntryPointsConfiguration(value)
match := regex.FindAllStringSubmatch(value, -1) if err != nil {
if match == nil { return err
return fmt.Errorf("bad EntryPoints format: %s", value)
}
matchResult := match[0]
result := make(map[string]string)
for i, name := range regex.SubexpNames() {
if i != 0 {
result[name] = matchResult[i]
}
} }
var configTLS *TLS var configTLS *TLS
if len(result["TLS"]) > 0 { if len(result["TLS"]) > 0 {
certs := Certificates{} certs := Certificates{}
@ -232,24 +225,13 @@ func (ep *EntryPoints) Set(value string) error {
} }
} }
compress := false
if len(result["Compress"]) > 0 {
compress = strings.EqualFold(result["Compress"], "true") ||
strings.EqualFold(result["Compress"], "enable") ||
strings.EqualFold(result["Compress"], "on")
}
whiteListSourceRange := []string{} whiteListSourceRange := []string{}
if len(result["WhiteListSourceRange"]) > 0 { if len(result["WhiteListSourceRange"]) > 0 {
whiteListSourceRange = strings.Split(result["WhiteListSourceRange"], ",") whiteListSourceRange = strings.Split(result["WhiteListSourceRange"], ",")
} }
proxyprotocol := false compress := toBool(result, "Compress")
if len(result["ProxyProtocol"]) > 0 { proxyProtocol := toBool(result, "ProxyProtocol")
proxyprotocol = strings.EqualFold(result["ProxyProtocol"], "true") ||
strings.EqualFold(result["ProxyProtocol"], "enable") ||
strings.EqualFold(result["ProxyProtocol"], "on")
}
(*ep)[result["Name"]] = &EntryPoint{ (*ep)[result["Name"]] = &EntryPoint{
Address: result["Address"], Address: result["Address"],
@ -257,12 +239,37 @@ func (ep *EntryPoints) Set(value string) error {
Redirect: redirect, Redirect: redirect,
Compress: compress, Compress: compress,
WhitelistSourceRange: whiteListSourceRange, WhitelistSourceRange: whiteListSourceRange,
ProxyProtocol: proxyprotocol, ProxyProtocol: proxyProtocol,
} }
return nil return nil
} }
func parseEntryPointsConfiguration(value string) (map[string]string, error) {
regex := regexp.MustCompile(`(?:Name:(?P<Name>\S*))\s*(?:Address:(?P<Address>\S*))?\s*(?:TLS:(?P<TLS>\S*))?\s*(?P<TLSACME>TLS)?\s*(?:CA:(?P<CA>\S*))?\s*(?:Redirect\.EntryPoint:(?P<RedirectEntryPoint>\S*))?\s*(?:Redirect\.Regex:(?P<RedirectRegex>\S*))?\s*(?:Redirect\.Replacement:(?P<RedirectReplacement>\S*))?\s*(?:Compress:(?P<Compress>\S*))?\s*(?:WhiteListSourceRange:(?P<WhiteListSourceRange>\S*))?\s*(?:ProxyProtocol:(?P<ProxyProtocol>\S*))?`)
match := regex.FindAllStringSubmatch(value, -1)
if match == nil {
return nil, fmt.Errorf("bad EntryPoints format: %s", value)
}
matchResult := match[0]
result := make(map[string]string)
for i, name := range regex.SubexpNames() {
if i != 0 && len(matchResult[i]) != 0 {
result[name] = matchResult[i]
}
}
return result, nil
}
func toBool(conf map[string]string, key string) bool {
if val, ok := conf[key]; ok {
return strings.EqualFold(val, "true") ||
strings.EqualFold(val, "enable") ||
strings.EqualFold(val, "on")
}
return false
}
// Get return the EntryPoints map // Get return the EntryPoints map
func (ep *EntryPoints) Get() interface{} { func (ep *EntryPoints) Get() interface{} {
return EntryPoints(*ep) return EntryPoints(*ep)

View file

@ -0,0 +1,201 @@
package configuration
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_parseEntryPointsConfiguration(t *testing.T) {
testCases := []struct {
name string
value string
expectedResult map[string]string
}{
{
name: "all parameters",
value: "Name:foo Address:bar TLS:goo TLS CA:car Redirect.EntryPoint:RedirectEntryPoint Redirect.Regex:RedirectRegex Redirect.Replacement:RedirectReplacement Compress:true WhiteListSourceRange:WhiteListSourceRange ProxyProtocol:true",
expectedResult: map[string]string{
"Name": "foo",
"Address": "bar",
"CA": "car",
"TLS": "goo",
"TLSACME": "TLS",
"RedirectEntryPoint": "RedirectEntryPoint",
"RedirectRegex": "RedirectRegex",
"RedirectReplacement": "RedirectReplacement",
"WhiteListSourceRange": "WhiteListSourceRange",
"ProxyProtocol": "true",
"Compress": "true",
},
},
{
name: "proxy protocol on",
value: "Name:foo ProxyProtocol:on",
expectedResult: map[string]string{
"Name": "foo",
"ProxyProtocol": "on",
},
},
{
name: "compress on",
value: "Name:foo Compress:on",
expectedResult: map[string]string{
"Name": "foo",
"Compress": "on",
},
},
{
name: "TLS",
value: "Name:foo TLS:goo TLS",
expectedResult: map[string]string{
"Name": "foo",
"TLS": "goo",
"TLSACME": "TLS",
},
},
}
for _, test := range testCases {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
conf, err := parseEntryPointsConfiguration(test.value)
if err != nil {
t.Error(err)
}
for key, value := range conf {
fmt.Println(key, value)
}
assert.Len(t, conf, len(test.expectedResult))
assert.Equal(t, test.expectedResult, conf)
})
}
}
func Test_toBool(t *testing.T) {
testCases := []struct {
name string
value string
key string
expectedBool bool
}{
{
name: "on",
value: "on",
key: "foo",
expectedBool: true,
},
{
name: "true",
value: "true",
key: "foo",
expectedBool: true,
},
{
name: "enable",
value: "enable",
key: "foo",
expectedBool: true,
},
{
name: "arbitrary string",
value: "bar",
key: "foo",
expectedBool: false,
},
{
name: "no existing entry",
value: "bar",
key: "fii",
expectedBool: false,
},
}
for _, test := range testCases {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
conf := map[string]string{
"foo": test.value,
}
result := toBool(conf, test.key)
assert.Equal(t, test.expectedBool, result)
})
}
}
func TestEntryPoints_Set(t *testing.T) {
testCases := []struct {
name string
expression string
expectedEntryPointName string
expectedEntryPoint *EntryPoint
}{
{
name: "all parameters",
expression: "Name:foo Address:bar TLS:goo,gii TLS CA:car Redirect.EntryPoint:RedirectEntryPoint Redirect.Regex:RedirectRegex Redirect.Replacement:RedirectReplacement Compress:true WhiteListSourceRange:Range ProxyProtocol:true",
expectedEntryPointName: "foo",
expectedEntryPoint: &EntryPoint{
Address: "bar",
Redirect: &Redirect{
EntryPoint: "RedirectEntryPoint",
Regex: "RedirectRegex",
Replacement: "RedirectReplacement",
},
Compress: true,
ProxyProtocol: true,
WhitelistSourceRange: []string{"Range"},
TLS: &TLS{
ClientCAFiles: []string{"car"},
Certificates: Certificates{
{
CertFile: FileOrContent("goo"),
KeyFile: FileOrContent("gii"),
},
},
},
},
},
{
name: "compress on",
expression: "Name:foo Compress:on",
expectedEntryPointName: "foo",
expectedEntryPoint: &EntryPoint{
Compress: true,
WhitelistSourceRange: []string{},
},
},
{
name: "compress true",
expression: "Name:foo Compress:true",
expectedEntryPointName: "foo",
expectedEntryPoint: &EntryPoint{
Compress: true,
WhitelistSourceRange: []string{},
},
},
}
for _, test := range testCases {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
eps := EntryPoints{}
err := eps.Set(test.expression)
require.NoError(t, err)
ep := eps[test.expectedEntryPointName]
assert.EqualValues(t, test.expectedEntryPoint, ep)
})
}
}

View file

@ -2,7 +2,7 @@
## Concepts ## Concepts
Let's take our example from the [overview](https://docs.traefik.io/#overview) again: Let's take our example from the [overview](/#overview) again:
> Imagine that you have deployed a bunch of microservices on your infrastructure. You probably used a service registry (like etcd or consul) and/or an orchestrator (swarm, Mesos/Marathon) to manage all these services. > Imagine that you have deployed a bunch of microservices on your infrastructure. You probably used a service registry (like etcd or consul) and/or an orchestrator (swarm, Mesos/Marathon) to manage all these services.
@ -185,6 +185,7 @@ In TOML file, you can use multiple routes:
``` ```
Here `frontend3` will forward the traffic to the `backend2` if the rules `Host:test3.localhost` **AND** `Path:/test` are matched. Here `frontend3` will forward the traffic to the `backend2` if the rules `Host:test3.localhost` **AND** `Path:/test` are matched.
You can also use the notation using a `;` separator, same result: You can also use the notation using a `;` separator, same result:
```toml ```toml
@ -208,7 +209,8 @@ Finally, you can create a rule to bind multiple domains or Path to a frontend, u
#### Rules Order #### Rules Order
When combining `Modifier` rules with `Matcher` rules, it is important to remember that `Modifier` rules **ALWAYS** apply after the `Matcher` rules. When combining `Modifier` rules with `Matcher` rules, it is important to remember that `Modifier` rules **ALWAYS** apply after the `Matcher` rules.
The following rules are both `Matchers` and `Modifiers`, so the `Matcher` portion of the rule will apply first, and the `Modifier` will apply later. The following rules are both `Matchers` and `Modifiers`, so the `Matcher` portion of the rule will apply first, and the `Modifier` will apply later.
- `PathStrip` - `PathStrip`
@ -252,7 +254,8 @@ Here, `frontend1` will be matched before `frontend2` (`10 > 5`).
#### Custom headers #### Custom headers
Custom headers can be configured through the frontends, to add headers to either requests or responses that match the frontend's rules. This allows for setting headers such as `X-Script-Name` to be added to the request, or custom headers to be added to the response: Custom headers can be configured through the frontends, to add headers to either requests or responses that match the frontend's rules.
This allows for setting headers such as `X-Script-Name` to be added to the request, or custom headers to be added to the response.
```toml ```toml
[frontends] [frontends]
@ -270,7 +273,10 @@ In this example, all matches to the path `/cheese` will have the `X-Script-Name`
#### Security headers #### Security headers
Security related headers (HSTS headers, SSL redirection, Browser XSS filter, etc) can be added and configured per frontend in a similar manner to the custom headers above. This functionality allows for some easy security features to quickly be set. An example of some of the security headers: Security related headers (HSTS headers, SSL redirection, Browser XSS filter, etc) can be added and configured per frontend in a similar manner to the custom headers above.
This functionality allows for some easy security features to quickly be set.
An example of some of the security headers:
```toml ```toml
[frontends] [frontends]
@ -290,7 +296,8 @@ Security related headers (HSTS headers, SSL redirection, Browser XSS filter, etc
In this example, traffic routed through the first frontend will have the `X-Frame-Options` header set to `DENY`, and the second will only allow HTTPS request through, otherwise will return a 301 HTTPS redirect. In this example, traffic routed through the first frontend will have the `X-Frame-Options` header set to `DENY`, and the second will only allow HTTPS request through, otherwise will return a 301 HTTPS redirect.
The detailed documentation for those security headers can be found in [unrolled/secure](https://github.com/unrolled/secure#available-options). !!! note
The detailed documentation for those security headers can be found in [unrolled/secure](https://github.com/unrolled/secure#available-options).
#### Rate limiting #### Rate limiting
@ -324,10 +331,12 @@ These can "burst" up to 10 and 200 in each period respectively.
### Backends ### Backends
A backend is responsible to load-balance the traffic coming from one or more frontends to a set of http servers. A backend is responsible to load-balance the traffic coming from one or more frontends to a set of http servers.
Various methods of load-balancing are supported: Various methods of load-balancing are supported:
- `wrr`: Weighted Round Robin - `wrr`: Weighted Round Robin
- `drr`: Dynamic Round Robin: increases weights on servers that perform better than others. It also rolls back to original weights if the servers have changed. - `drr`: Dynamic Round Robin: increases weights on servers that perform better than others.
It also rolls back to original weights if the servers have changed.
A circuit breaker can also be applied to a backend, preventing high loads on failing servers. A circuit breaker can also be applied to a backend, preventing high loads on failing servers.
Initial state is Standby. CB observes the statistics and does not modify the request. Initial state is Standby. CB observes the statistics and does not modify the request.
@ -366,9 +375,10 @@ For example:
- Another possible value for `extractorfunc` is `client.ip` which will categorize requests based on client source ip. - Another possible value for `extractorfunc` is `client.ip` which will categorize requests based on client source ip.
- Lastly `extractorfunc` can take the value of `request.header.ANY_HEADER` which will categorize requests based on `ANY_HEADER` that you provide. - Lastly `extractorfunc` can take the value of `request.header.ANY_HEADER` which will categorize requests based on `ANY_HEADER` that you provide.
Sticky sessions are supported with both load balancers. When sticky sessions are enabled, a cookie called `_TRAEFIK_BACKEND` is set on the initial Sticky sessions are supported with both load balancers.
request. On subsequent requests, the client will be directed to the backend stored in the cookie if it is still healthy. If not, a new backend When sticky sessions are enabled, a cookie called `_TRAEFIK_BACKEND` is set on the initial request.
will be assigned. On subsequent requests, the client will be directed to the backend stored in the cookie if it is still healthy.
If not, a new backend will be assigned.
For example: For example:
```toml ```toml
@ -378,12 +388,9 @@ For example:
sticky = true sticky = true
``` ```
A health check can be configured in order to remove a backend from LB rotation A health check can be configured in order to remove a backend from LB rotation as long as it keeps returning HTTP status codes other than `200 OK` to HTTP GET requests periodically carried out by Traefik.
as long as it keeps returning HTTP status codes other than 200 OK to HTTP GET The check is defined by a pathappended to the backend URL and an interval (given in a format understood by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration)) specifying how often the health check should be executed (the default being 30 seconds).
requests periodically carried out by Traefik. The check is defined by a path Each backend must respond to the health check within 5 seconds.
appended to the backend URL and an interval (given in a format understood by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration)) specifying how
often the health check should be executed (the default being 30 seconds).
Each backend must respond to the health check within 5 seconds.
By default, the port of the backend server is used, however, this may be overridden. By default, the port of the backend server is used, however, this may be overridden.
A recovering backend returning 200 OK responses again is being returned to the A recovering backend returning 200 OK responses again is being returned to the
@ -466,7 +473,9 @@ Each item takes precedence over the item below it:
It means that arguments override configuration file, and key-value store overrides arguments. It means that arguments override configuration file, and key-value store overrides arguments.
Note that the provider-enabling argument parameters (e.g., `--docker`) set all default values for the specific provider. It must not be used if a configuration source with less precedence wants to set a non-default provider value. !!! note
the provider-enabling argument parameters (e.g., `--docker`) set all default values for the specific provider.
It must not be used if a configuration source with less precedence wants to set a non-default provider value.
#### Configuration file #### Configuration file
@ -532,18 +541,19 @@ traefik [command] [--flag=flag_argument]
List of Træfik available commands with description : List of Træfik available commands with description :
- `version` : Print version - `version` : Print version
- `storeconfig` : Store the static traefik configuration into a Key-value stores. Please refer to the [Store Træfik configuration](/user-guide/kv-config/#store-trfk-configuration) section to get documentation on it. - `storeconfig` : Store the static Traefik configuration into a Key-value stores. Please refer to the [Store Træfik configuration](/user-guide/kv-config/#store-trfk-configuration) section to get documentation on it.
- `bug`: The easiest way to submit a pre-filled issue. - `bug`: The easiest way to submit a pre-filled issue.
- `healthcheck`: Calls traefik `/ping` to check health. - `healthcheck`: Calls Traefik `/ping` to check health.
Each command may have related flags. Each command may have related flags.
All those related flags will be displayed with : All those related flags will be displayed with :
```bash ```bash
traefik [command] --help traefik [command] --help
``` ```
Note that each command is described at the beginning of the help section: Each command is described at the beginning of the help section:
```bash ```bash
traefik --help traefik --help
@ -557,16 +567,20 @@ Here is the easiest way to submit a pre-filled issue on [Træfik GitHub](https:/
traefik bug traefik bug
``` ```
See https://www.youtube.com/watch?v=Lyz62L8m93I. Watch [this demo](https://www.youtube.com/watch?v=Lyz62L8m93I).
### Command: healthcheck ### Command: healthcheck
This command allows to check the health of Traefik. Its exit status is `0` if Traefik is healthy and `1` if it is unhealthy. This command allows to check the health of Traefik. Its exit status is `0` if Traefik is healthy and `1` if it is unhealthy.
This can be used with Docker [HEALTHCHECK](https://docs.docker.com/engine/reference/builder/#healthcheck) instruction or any other health check orchestration mechanism. This can be used with Docker [HEALTHCHECK](https://docs.docker.com/engine/reference/builder/#healthcheck) instruction or any other health check orchestration mechanism.
Note: the `web` provider must be enabled to allow `/ping` calls by the `healthcheck` command. !!! note
The [`web` provider](/configuration/backends/web) must be enabled to allow `/ping` calls by the `healthcheck` command.
```bash ```bash
$ traefik healthcheck traefik healthcheck
```
```bash
OK: http://:8082/ping OK: http://:8082/ping
``` ```

View file

@ -14,7 +14,7 @@ I used 4 VMs for the tests with the following configuration:
## Setup ## Setup
1. One VM used to launch the benchmarking tool [wrk](https://github.com/wg/wrk) 1. One VM used to launch the benchmarking tool [wrk](https://github.com/wg/wrk)
2. One VM for traefik (v1.0.0-beta.416) / nginx (v1.4.6) 2. One VM for Traefik (v1.0.0-beta.416) / nginx (v1.4.6)
3. Two VMs for 2 backend servers in go [whoami](https://github.com/emilevauge/whoamI/) 3. Two VMs for 2 backend servers in go [whoami](https://github.com/emilevauge/whoamI/)
Each VM has been tuned using the following limits: Each VM has been tuned using the following limits:
@ -182,7 +182,8 @@ Requests/sec: 33591.67
Transfer/sec: 4.97MB Transfer/sec: 4.97MB
``` ```
### traefik: ### Traefik:
```shell ```shell
wrk -t20 -c1000 -d60s -H "Host: test.traefik" --latency http://IP-traefik:8000/bench wrk -t20 -c1000 -d60s -H "Host: test.traefik" --latency http://IP-traefik:8000/bench
Running 1m test @ http://IP-traefik:8000/bench Running 1m test @ http://IP-traefik:8000/bench
@ -209,5 +210,5 @@ Not bad for young project :) !
Some areas of possible improvements: Some areas of possible improvements:
- Use [GO_REUSEPORT](https://github.com/kavu/go_reuseport) listener - Use [GO_REUSEPORT](https://github.com/kavu/go_reuseport) listener
- Run a separate server instance per CPU core with `GOMAXPROCS=1` (it appears during benchmarks that there is a lot more context switches with traefik than with nginx) - Run a separate server instance per CPU core with `GOMAXPROCS=1` (it appears during benchmarks that there is a lot more context switches with Traefik than with nginx)

View file

@ -1,33 +1,31 @@
## ACME (Let's Encrypt) configuration # ACME (Let's Encrypt) configuration
See also [Let's Encrypt examples](/user-guide/examples/#lets-encrypt-support) and [Docker & Let's Encrypt user guide](/user-guide/docker-and-lets-encrypt).
## Configuration
```toml ```toml
# Sample entrypoint configuration when using ACME # Sample entrypoint configuration when using ACME.
[entryPoints] [entryPoints]
[entryPoints.https] [entryPoints.https]
address = ":443" address = ":443"
[entryPoints.https.tls] [entryPoints.https.tls]
# Enable ACME (Let's Encrypt): automatic SSL # Enable ACME (Let's Encrypt): automatic SSL.
[acme] [acme]
# Email address used for registration # Email address used for registration.
# #
# Required # Required
# #
email = "test@traefik.io" email = "test@traefik.io"
# File or key used for certificates storage. # File or key used for certificates storage.
# WARNING, if you use Traefik in Docker, you have 2 options:
# - create a file on your host and mount it as a volume
# storageFile = "acme.json"
# $ docker run -v "/my/host/acme.json:acme.json" traefik
# - mount the folder containing the file as a volume
# storageFile = "/etc/traefik/acme/acme.json"
# $ docker run -v "/my/host/acme:/etc/traefik/acme" traefik
# #
# Required # Required
# #
storage = "acme.json" # or "traefik/acme/account" if using KV store storage = "acme.json"
# or `storage = "traefik/acme/account"` if using KV store.
# Entrypoint to proxy acme challenge/apply certificates to. # Entrypoint to proxy acme challenge/apply certificates to.
# WARNING, must point to an entrypoint on port 443 # WARNING, must point to an entrypoint on port 443
@ -36,71 +34,49 @@ storage = "acme.json" # or "traefik/acme/account" if using KV store
# #
entryPoint = "https" entryPoint = "https"
# Use a DNS based acme challenge rather than external HTTPS access, e.g. for a firewalled server # Use a DNS based acme challenge rather than external HTTPS access
# Select the provider that matches the DNS domain that will host the challenge TXT record, #
# and provide environment variables with access keys to enable setting it:
# - cloudflare: CLOUDFLARE_EMAIL, CLOUDFLARE_API_KEY
# - digitalocean: DO_AUTH_TOKEN
# - dnsimple: DNSIMPLE_EMAIL, DNSIMPLE_OAUTH_TOKEN
# - dnsmadeeasy: DNSMADEEASY_API_KEY, DNSMADEEASY_API_SECRET
# - exoscale: EXOSCALE_API_KEY, EXOSCALE_API_SECRET
# - gandi: GANDI_API_KEY
# - linode: LINODE_API_KEY
# - manual: none, but run traefik interactively & turn on acmeLogging to see instructions & press Enter
# - namecheap: NAMECHEAP_API_USER, NAMECHEAP_API_KEY
# - rfc2136: RFC2136_TSIG_KEY, RFC2136_TSIG_SECRET, RFC2136_TSIG_ALGORITHM, RFC2136_NAMESERVER
# - route53: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, or configured user/instance IAM profile
# - dyn: DYN_CUSTOMER_NAME, DYN_USER_NAME, DYN_PASSWORD
# - vultr: VULTR_API_KEY
# - ovh: OVH_ENDPOINT, OVH_APPLICATION_KEY, OVH_APPLICATION_SECRET, OVH_CONSUMER_KEY
# - pdns: PDNS_API_KEY, PDNS_API_URL
# #
# Optional # Optional
# #
# dnsProvider = "digitalocean" # dnsProvider = "digitalocean"
# By default, the dnsProvider will verify the TXT DNS challenge record before letting ACME verify # By default, the dnsProvider will verify the TXT DNS challenge record before letting ACME verify.
# If delayDontCheckDNS is greater than zero, avoid this & instead just wait so many seconds. # If delayDontCheckDNS is greater than zero, avoid this & instead just wait so many seconds.
# Useful if internal networks block external DNS queries # Useful if internal networks block external DNS queries.
# #
# Optional # Optional
# #
# delayDontCheckDNS = 0 # delayDontCheckDNS = 0
# If true, display debug log messages from the acme client library # If true, display debug log messages from the acme client library.
# #
# Optional # Optional
# #
# acmeLogging = true # acmeLogging = true
# Enable on demand certificate. This will request a certificate from Let's Encrypt during the first TLS handshake for a hostname that does not yet have a certificate. # Enable on demand certificate.
# WARNING, TLS handshakes will be slow when requesting a hostname certificate for the first time, this can leads to DoS attacks.
# WARNING, Take note that Let's Encrypt have rate limiting: https://letsencrypt.org/docs/rate-limits
# #
# Optional # Optional
# #
# onDemand = true # onDemand = true
# Enable certificate generation on frontends Host rules. This will request a certificate from Let's Encrypt for each frontend with a Host rule. # Enable certificate generation on frontends Host rules.
# For example, a rule Host:test1.traefik.io,test2.traefik.io will request a certificate with main domain test1.traefik.io and SAN test2.traefik.io.
# #
# Optional # Optional
# #
# OnHostRule = true # onHostRule = true
# CA server to use # CA server to use.
# Uncomment the line to run on the staging let's encrypt server # - Uncomment the line to run on the staging let's encrypt server.
# Leave comment to go to prod # - Leave comment to go to prod.
# #
# Optional # Optional
# #
# caServer = "https://acme-staging.api.letsencrypt.org/directory" # caServer = "https://acme-staging.api.letsencrypt.org/directory"
# Domains list # Domains list.
# You can provide SANs (alternative domains) to each main domain #
# All domains must have A/AAAA records pointing to Traefik
# WARNING, Take note that Let's Encrypt have rate limiting: https://letsencrypt.org/docs/rate-limits
# Each domain & SANs will lead to a certificate request.
# [[acme.domains]] # [[acme.domains]]
# main = "local1.com" # main = "local1.com"
# sans = ["test1.local1.com", "test2.local1.com"] # sans = ["test1.local1.com", "test2.local1.com"]
@ -112,3 +88,151 @@ entryPoint = "https"
# [[acme.domains]] # [[acme.domains]]
# main = "local4.com" # main = "local4.com"
``` ```
### `storage`
```toml
[acme]
# ...
storage = "acme.json"
# ...
```
File or key used for certificates storage.
**WARNING** If you use Traefik in Docker, you have 2 options:
- create a file on your host and mount it as a volume:
```toml
storage = "acme.json"
```
```bash
docker run -v "/my/host/acme.json:acme.json" traefik
```
- mount the folder containing the file as a volume
```toml
storage = "/etc/traefik/acme/acme.json"
```
```bash
docker run -v "/my/host/acme:/etc/traefik/acme" traefik
```
### `dnsProvider`
```toml
[acme]
# ...
dnsProvider = "digitalocean"
# ...
```
Use a DNS based acme challenge rather than external HTTPS access, e.g. for a firewalled server.
Select the provider that matches the DNS domain that will host the challenge TXT record, and provide environment variables with access keys to enable setting it:
| Provider | Configuration |
|----------------------------------------------|-----------------------------------------------------------------------------------------------------------|
| [Cloudflare](https://www.cloudflare.com) | `CLOUDFLARE_EMAIL`, `CLOUDFLARE_API_KEY` |
| [DigitalOcean](https://www.digitalocean.com) | `DO_AUTH_TOKEN` |
| [DNSimple](https://dnsimple.com) | `DNSIMPLE_EMAIL`, `DNSIMPLE_OAUTH_TOKEN` |
| [DNS Made Easy](https://dnsmadeeasy.com) | `DNSMADEEASY_API_KEY`, `DNSMADEEASY_API_SECRET` |
| [Exoscale](https://www.exoscale.ch) | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET` |
| [Gandi](https://www.gandi.net) | `GANDI_API_KEY` |
| [Linode](https://www.linode.com) | `LINODE_API_KEY` |
| manual | none, but run Traefik interactively & turn on `acmeLogging` to see instructions & press <kbd>Enter</kbd>. |
| [Namecheap](https://www.namecheap.com) | `NAMECHEAP_API_USER`, `NAMECHEAP_API_KEY` |
| RFC2136 | `RFC2136_TSIG_KEY`, `RFC2136_TSIG_SECRET`, `RFC2136_TSIG_ALGORITHM`, `RFC2136_NAMESERVER` |
| [Route 53](https://aws.amazon.com/route53/) | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_REGION`, or configured user/instance IAM profile. |
| [dyn](https://dyn.com) | `DYN_CUSTOMER_NAME`, `DYN_USER_NAME`, `DYN_PASSWORD` |
| [VULTR](https://www.vultr.com) | `VULTR_API_KEY` |
| [OVH](https://www.ovh.com) | `OVH_ENDPOINT`, `OVH_APPLICATION_KEY`, `OVH_APPLICATION_SECRET`, `OVH_CONSUMER_KEY` |
| [pdns](https://www.powerdns.com) | `PDNS_API_KEY`, `PDNS_API_URL` |
### `delayDontCheckDNS`
```toml
[acme]
# ...
delayDontCheckDNS = 0
# ...
```
By default, the dnsProvider will verify the TXT DNS challenge record before letting ACME verify.
If `delayDontCheckDNS` is greater than zero, avoid this & instead just wait so many seconds.
Useful if internal networks block external DNS queries.
### `onDemand`
```toml
[acme]
# ...
onDemand = true
# ...
```
Enable on demand certificate.
This will request a certificate from Let's Encrypt during the first TLS handshake for a hostname that does not yet have a certificate.
!!! warning
TLS handshakes will be slow when requesting a hostname certificate for the first time, this can leads to DoS attacks.
!!! warning
Take note that Let's Encrypt have [rate limiting](https://letsencrypt.org/docs/rate-limits)
### `onHostRule`
```toml
[acme]
# ...
onHostRule = true
# ...
```
Enable certificate generation on frontends Host rules.
This will request a certificate from Let's Encrypt for each frontend with a Host rule.
For example, a rule `Host:test1.traefik.io,test2.traefik.io` will request a certificate with main domain `test1.traefik.io` and SAN `test2.traefik.io`.
### `caServer`
```toml
[acme]
# ...
caServer = "https://acme-staging.api.letsencrypt.org/directory"
# ...
```
CA server to use.
- Uncomment the line to run on the staging Let's Encrypt server.
- Leave comment to go to prod.
### `domains`
```toml
[acme]
# ...
[[acme.domains]]
main = "local1.com"
sans = ["test1.local1.com", "test2.local1.com"]
[[acme.domains]]
main = "local2.com"
sans = ["test1.local2.com", "test2.local2.com"]
[[acme.domains]]
main = "local3.com"
[[acme.domains]]
main = "local4.com"
# ...
```
You can provide SANs (alternative domains) to each main domain.
All domains must have A/AAAA records pointing to Traefik.
!!! warning
Take note that Let's Encrypt have [rate limiting](https://letsencrypt.org/docs/rate-limits).
Each domain & SANs will lead to a certificate request.

View file

@ -1,36 +1,59 @@
# BoltDB Backend # BoltDB Backend
Træfik can be configured to use BoltDB as a backend configuration: Træfik can be configured to use BoltDB as a backend configuration.
```toml ```toml
################################################################ ################################################################
# BoltDB configuration backend # BoltDB configuration backend
################################################################ ################################################################
# Enable BoltDB configuration backend # Enable BoltDB configuration backend.
[boltdb] [boltdb]
# BoltDB file # BoltDB file.
# #
# Required # Required
# Default: "127.0.0.1:4001"
# #
endpoint = "/my.db" endpoint = "/my.db"
# Enable watch BoltDB changes # Enable watch BoltDB changes.
# #
# Optional # Optional
# Default: true
# #
watch = true watch = true
# Prefix used for KV store. # Prefix used for KV store.
# #
# Optional # Optional
# Default: "/traefik"
# #
prefix = "/traefik" prefix = "/traefik"
# Override default configuration template. For advanced users :) # Override default configuration template.
# For advanced users :)
# #
# Optional # Optional
# #
filename = "boltdb.tmpl" filename = "boltdb.tmpl"
# Use BoltDB user/pass authentication.
#
# Optional
#
# username = foo
# password = bar
# Enable BoltDB TLS connection.
#
# Optional
#
# [boltdb.tls]
# ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/boltdb.crt"
# key = "/etc/ssl/boltdb.key"
# insecureskipverify = true
``` ```
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).

View file

@ -2,90 +2,101 @@
## Consul Key-Value backend ## Consul Key-Value backend
Træfik can be configured to use Consul as a backend configuration: Træfik can be configured to use Consul as a backend configuration.
```toml ```toml
################################################################ ################################################################
# Consul KV configuration backend # Consul KV configuration backend
################################################################ ################################################################
# Enable Consul KV configuration backend # Enable Consul KV configuration backend.
[consul] [consul]
# Consul server endpoint # Consul server endpoint.
# #
# Required # Required
# Default: "127.0.0.1:8500"
# #
endpoint = "127.0.0.1:8500" endpoint = "127.0.0.1:8500"
# Enable watch Consul changes # Enable watch Consul changes.
# #
# Optional # Optional
# Default: true
# #
watch = true watch = true
# Prefix used for KV store. # Prefix used for KV store.
# #
# Optional # Optional
# Default: traefik
# #
prefix = "traefik" prefix = "traefik"
# Override default configuration template. For advanced users :) # Override default configuration template.
# For advanced users :)
# #
# Optional # Optional
# #
# filename = "consul.tmpl" # filename = "consul.tmpl"
# Enable consul TLS connection # Use Consul user/pass authentication.
# #
# Optional # Optional
# #
# [consul.tls] # username = foo
# ca = "/etc/ssl/ca.crt" # password = bar
# cert = "/etc/ssl/consul.crt"
# key = "/etc/ssl/consul.key" # Enable Consul TLS connection.
# insecureskipverify = true #
# Optional
#
# [consul.tls]
# ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/consul.crt"
# key = "/etc/ssl/consul.key"
# insecureskipverify = true
``` ```
Please refer to the [Key Value storage structure](/user-guide/kv-config/#key-value-storage-structure) section to get documentation on traefik KV structure. To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).
## Consul catalog backend Please refer to the [Key Value storage structure](/user-guide/kv-config/#key-value-storage-structure) section to get documentation on Traefik KV structure.
Træfik can be configured to use service discovery catalog of Consul as a backend configuration:
## Consul Catalog backend
Træfik can be configured to use service discovery catalog of Consul as a backend configuration.
```toml ```toml
################################################################ ################################################################
# Consul Catalog configuration backend # Consul Catalog configuration backend
################################################################ ################################################################
# Enable Consul Catalog configuration backend # Enable Consul Catalog configuration backend.
[consulCatalog] [consulCatalog]
# Consul server endpoint # Consul server endpoint.
# #
# Required # Required
# Default: "127.0.0.1:8500"
# #
endpoint = "127.0.0.1:8500" endpoint = "127.0.0.1:8500"
# Default domain used. # Expose Consul catalog services by default in Traefik.
#
# Optional
#
domain = "consul.localhost"
# Expose Consul catalog services by default in traefik
# #
# Optional # Optional
# Default: true
# #
exposedByDefault = false exposedByDefault = false
# Prefix for Consul catalog tags # Prefix for Consul catalog tags.
# #
# Optional # Optional
# Default: "traefik"
# #
prefix = "traefik" prefix = "traefik"
# Default frontEnd Rule for Consul services # Default frontEnd Rule for Consul services.
# #
# The format is a Go Template with: # The format is a Go Template with:
# - ".ServiceName", ".Domain" and ".Attributes" available # - ".ServiceName", ".Domain" and ".Attributes" available
@ -93,13 +104,18 @@ prefix = "traefik"
# - "getAttribute(...)" function uses prefixed tag names based on "prefix" value # - "getAttribute(...)" function uses prefixed tag names based on "prefix" value
# #
# Optional # Optional
# Default: "Host:{{.ServiceName}}.{{.Domain}}"
# #
#frontEndRule = "Host:{{.ServiceName}}.{{Domain}}" #frontEndRule = "Host:{{.ServiceName}}.{{Domain}}"
``` ```
This backend will create routes matching on hostname based on the service name used in consul. This backend will create routes matching on hostname based on the service name used in Consul.
Additional settings can be defined using Consul Catalog tags: To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).
### Tags
Additional settings can be defined using Consul Catalog tags.
| Tag | Description | | Tag | Description |
|---------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |---------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|

View file

@ -9,7 +9,7 @@ Træfik can be configured to use Docker as a backend configuration.
# Docker configuration backend # Docker configuration backend
################################################################ ################################################################
# Enable Docker configuration backend # Enable Docker configuration backend.
[docker] [docker]
# Docker server endpoint. Can be a tcp or a unix socket endpoint. # Docker server endpoint. Can be a tcp or a unix socket endpoint.
@ -25,41 +25,43 @@ endpoint = "unix:///var/run/docker.sock"
# #
domain = "docker.localhost" domain = "docker.localhost"
# Enable watch docker changes # Enable watch docker changes.
# #
# Optional # Optional
# #
watch = true watch = true
# Override default configuration template. For advanced users :) # Override default configuration template.
# For advanced users :)
# #
# Optional # Optional
# #
# filename = "docker.tmpl" # filename = "docker.tmpl"
# Expose containers by default in traefik # Expose containers by default in Traefik.
# If set to false, containers that don't have `traefik.enable=true` will be ignored # If set to false, containers that don't have `traefik.enable=true` will be ignored.
# #
# Optional # Optional
# Default: true # Default: true
# #
exposedbydefault = true exposedbydefault = true
# Use the IP address from the binded port instead of the inner network one. For specific use-case :) # Use the IP address from the binded port instead of the inner network one.
# For specific use-case :)
# #
# Optional # Optional
# Default: false # Default: false
# #
usebindportip = true usebindportip = true
# Use Swarm Mode services as data provider # Use Swarm Mode services as data provider.
# #
# Optional # Optional
# Default: false # Default: false
# #
swarmmode = false swarmmode = false
# Enable docker TLS connection # Enable docker TLS connection.
# #
# Optional # Optional
# #
@ -70,6 +72,9 @@ swarmmode = false
# insecureskipverify = true # insecureskipverify = true
``` ```
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).
## Docker Swarm Mode ## Docker Swarm Mode
```toml ```toml
@ -77,10 +82,11 @@ swarmmode = false
# Docker Swarmmode configuration backend # Docker Swarmmode configuration backend
################################################################ ################################################################
# Enable Docker configuration backend # Enable Docker configuration backend.
[docker] [docker]
# Docker server endpoint. Can be a tcp or a unix socket endpoint. # Docker server endpoint.
# Can be a tcp or a unix socket endpoint.
# #
# Required # Required
# Default: "unix:///var/run/docker.sock" # Default: "unix:///var/run/docker.sock"
@ -95,40 +101,52 @@ endpoint = "tcp://127.0.0.1:2375"
# #
domain = "docker.localhost" domain = "docker.localhost"
# Enable watch docker changes # Enable watch docker changes.
# #
# Optional # Optional
# Default: true
# #
watch = true watch = true
# Use Docker Swarm Mode as data provider # Use Docker Swarm Mode as data provider.
#
# Optional
# Default: false
#
swarmmode = true swarmmode = true
# Override default configuration template. For advanced users :) # Override default configuration template.
# For advanced users :)
# #
# Optional # Optional
# #
# filename = "docker.tmpl" # filename = "docker.tmpl"
# Expose services by default in traefik # Expose services by default in Traefik.
# #
# Optional # Optional
# Default: true # Default: true
# #
exposedbydefault = false exposedbydefault = false
# Enable docker TLS connection # Enable docker TLS connection.
# #
# Optional # Optional
# #
# [swarm.tls] # [docker.tls]
# ca = "/etc/ssl/ca.crt" # ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/docker.crt" # cert = "/etc/ssl/docker.crt"
# key = "/etc/ssl/docker.key" # key = "/etc/ssl/docker.key"
# insecureskipverify = true # insecureskipverify = true
``` ```
## Labels can be used on containers to override default behaviour To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).
## Labels: overriding default behaviour
### On Containers
Labels can be used on containers to override default behaviour.
| Label | Description | | Label | Description |
|---------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |---------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
@ -149,9 +167,11 @@ exposedbydefault = false
| `traefik.frontend.entryPoints=http,https` | Assign this frontend to entry points `http` and `https`. Overrides `defaultEntryPoints` | | `traefik.frontend.entryPoints=http,https` | Assign this frontend to entry points `http` and `https`. Overrides `defaultEntryPoints` |
| `traefik.frontend.auth.basic=EXPR` | Sets basic authentication for that frontend in CSV format: `User:Hash,User:Hash` | | `traefik.frontend.auth.basic=EXPR` | Sets basic authentication for that frontend in CSV format: `User:Hash,User:Hash` |
| `traefik.frontend.whitelistSourceRange:RANGE` | List of IP-Ranges which are allowed to access. An unset or empty list allows all Source-IPs to access. If one of the Net-Specifications are invalid, the whole list is invalid and allows all Source-IPs to access. | | `traefik.frontend.whitelistSourceRange:RANGE` | List of IP-Ranges which are allowed to access. An unset or empty list allows all Source-IPs to access. If one of the Net-Specifications are invalid, the whole list is invalid and allows all Source-IPs to access. |
| `traefik.docker.network` | Set the docker network to use for connections to this container. If a container is linked to several networks, be sure to set the proper network name (you can check with docker inspect <container_id>) otherwise it will randomly pick one (depending on how docker is returning them). For instance when deploying docker `stack` from compose files, the compose defined networks will be prefixed with the `stack` name. | | `traefik.docker.network` | Set the docker network to use for connections to this container. If a container is linked to several networks, be sure to set the proper network name (you can check with `docker inspect <container_id>`) otherwise it will randomly pick one (depending on how docker is returning them). For instance when deploying docker `stack` from compose files, the compose defined networks will be prefixed with the `stack` name. |
### Services labels can be used for overriding default behaviour ### On Service
Services labels can be used for overriding default behaviour
| Label | Description | | Label | Description |
|---------------------------------------------------|--------------------------------------------------------------------------------------------------| |---------------------------------------------------|--------------------------------------------------------------------------------------------------|

View file

@ -1,63 +1,71 @@
# DynamoDB Backend # DynamoDB Backend
Træfik can be configured to use Amazon DynamoDB as a backend configuration: Træfik can be configured to use Amazon DynamoDB as a backend configuration.
## Configuration
```toml ```toml
################################################################ ################################################################
# DynamoDB configuration backend # DynamoDB configuration backend
################################################################ ################################################################
# Enable DynamoDB configuration backend # Enable DynamoDB configuration backend.
[dynamodb] [dynamodb]
# DyanmoDB Table Name # Region to use when connecting to AWS.
#
# Optional
#
TableName = "traefik"
# Enable watch DynamoDB changes
#
# Optional
#
Watch = true
# Polling interval (in seconds)
#
# Optional
#
RefreshSeconds = 15
# Region to use when connecting to AWS
# #
# Required # Required
# #
Region = "us-west-1" region = "us-west-1"
# AccessKeyID to use when connecting to AWS # DyanmoDB Table Name.
#
# Optional
# Default: "traefik"
#
tableName = "traefik"
# Enable watch DynamoDB changes.
#
# Optional
# Default: true
#
watch = true
# Polling interval (in seconds).
#
# Optional
# Default: 15
#
refreshSeconds = 15
# AccessKeyID to use when connecting to AWS.
# #
# Optional # Optional
# #
AccessKeyID = "abc" accessKeyID = "abc"
# SecretAccessKey to use when connecting to AWS # SecretAccessKey to use when connecting to AWS.
# #
# Optional # Optional
# #
SecretAccessKey = "123" secretAccessKey = "123"
# Endpoint of local dynamodb instance for testing # Endpoint of local dynamodb instance for testing?
# #
# Optional # Optional
# #
Endpoint = "http://localhost:8080" endpoint = "http://localhost:8080"
``` ```
## Table Items
Items in the `dynamodb` table must have three attributes: Items in the `dynamodb` table must have three attributes:
- `id` (string): The id is the primary key. - `id` (string): The id is the primary key.
- `name`(string): The name is used as the name of the frontend or backend. - `name`(string): The name is used as the name of the frontend or backend.
- `frontend` or `backend` (map): This attribute's structure matches exactly the structure of a Frontend or Backend type in traefik. - `frontend` or `backend` (map): This attribute's structure matches exactly the structure of a Frontend or Backend type in Traefik.
See `types/types.go` for details. See `types/types.go` for details.
The presence or absence of this attribute determines its type. The presence or absence of this attribute determines its type.
So an item should never have both a `frontend` and a `backend` attribute. So an item should never have both a `frontend` and a `backend` attribute.

View file

@ -1,102 +1,99 @@
# ECS Backend # ECS Backend
Træfik can be configured to use Amazon ECS as a backend configuration: Træfik can be configured to use Amazon ECS as a backend configuration.
## Configuration
```toml ```toml
################################################################ ################################################################
# ECS configuration backend # ECS configuration backend
################################################################ ################################################################
# Enable ECS configuration backend # Enable ECS configuration backend.
[ecs] [ecs]
# ECS Cluster Name # ECS Cluster Name.
# #
# DEPRECATED - Please use Clusters # DEPRECATED - Please use `clusters`.
# #
Cluster = "default" cluster = "default"
# ECS Clusters Name # ECS Clusters Name.
# #
# Optional # Optional
# Default: ["default"] # Default: ["default"]
# #
Clusters = ["default"] clusters = ["default"]
# Enable watch ECS changes # Enable watch ECS changes.
# #
# Optional # Optional
# Default: true # Default: true
# #
Watch = true watch = true
# Enable auto discover ECS clusters # Default domain used.
#
# Optional
# Default: ""
#
domain = "ecs.localhost"
# Enable auto discover ECS clusters.
# #
# Optional # Optional
# Default: false # Default: false
# #
AutoDiscoverClusters = false autoDiscoverClusters = false
# Polling interval (in seconds) # Polling interval (in seconds).
# #
# Optional # Optional
# Default: 15 # Default: 15
# #
RefreshSeconds = 15 refreshSeconds = 15
# Expose ECS services by default in traefik # Expose ECS services by default in Traefik.
# #
# Optional # Optional
# Default: true # Default: true
# #
ExposedByDefault = false exposedByDefault = false
# Region to use when connecting to AWS # Region to use when connecting to AWS.
# #
# Optional # Optional
# #
Region = "us-east-1" region = "us-east-1"
# AccessKeyID to use when connecting to AWS # AccessKeyID to use when connecting to AWS.
# #
# Optional # Optional
# #
AccessKeyID = "abc" accessKeyID = "abc"
# SecretAccessKey to use when connecting to AWS # SecretAccessKey to use when connecting to AWS.
# #
# Optional # Optional
# #
SecretAccessKey = "123" secretAccessKey = "123"
# Override default configuration template. For advanced users :) # Override default configuration template.
# For advanced users :)
# #
# Optional # Optional
# #
# filename = "ecs.tmpl" # filename = "ecs.tmpl"
``` ```
Labels can be used on task containers to override default behaviour:
| Label | Description |
|---------------------------------------------------|------------------------------------------------------------------------------------------|
| `traefik.protocol=https` | override the default `http` protocol |
| `traefik.weight=10` | assign this weight to the container |
| `traefik.enable=false` | disable this container in Træfik |
| `traefik.backend.loadbalancer.method=drr` | override the default `wrr` load balancer algorithm |
| `traefik.backend.loadbalancer.sticky=true` | enable backend sticky sessions |
| `traefik.frontend.rule=Host:test.traefik.io` | override the default frontend rule (Default: `Host:{containerName}.{domain}`). |
| `traefik.frontend.passHostHeader=true` | forward client `Host` header to the backend. |
| `traefik.frontend.priority=10` | override default frontend priority |
| `traefik.frontend.entryPoints=http,https` | assign this frontend to entry points `http` and `https`. Overrides `defaultEntryPoints`. |
| `traefik.frontend.auth.basic=EXPR` | Sets basic authentication for that frontend in CSV format: `User:Hash,User:Hash` |
If `AccessKeyID`/`SecretAccessKey` is not given credentials will be resolved in the following order: If `AccessKeyID`/`SecretAccessKey` is not given credentials will be resolved in the following order:
- From environment variables; `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_SESSION_TOKEN`. - From environment variables; `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_SESSION_TOKEN`.
- Shared credentials, determined by `AWS_PROFILE` and `AWS_SHARED_CREDENTIALS_FILE`, defaults to `default` and `~/.aws/credentials`. - Shared credentials, determined by `AWS_PROFILE` and `AWS_SHARED_CREDENTIALS_FILE`, defaults to `default` and `~/.aws/credentials`.
- EC2 instance role or ECS task role - EC2 instance role or ECS task role
## Policy
Træfik needs the following policy to read ECS information: Træfik needs the following policy to read ECS information:
```json ```json
@ -122,3 +119,20 @@ Træfik needs the following policy to read ECS information:
] ]
} }
``` ```
## Labels: overriding default behaviour
Labels can be used on task containers to override default behaviour:
| Label | Description |
|---------------------------------------------------|------------------------------------------------------------------------------------------|
| `traefik.protocol=https` | override the default `http` protocol |
| `traefik.weight=10` | assign this weight to the container |
| `traefik.enable=false` | disable this container in Træfik |
| `traefik.backend.loadbalancer.method=drr` | override the default `wrr` load balancer algorithm |
| `traefik.backend.loadbalancer.sticky=true` | enable backend sticky sessions |
| `traefik.frontend.rule=Host:test.traefik.io` | override the default frontend rule (Default: `Host:{containerName}.{domain}`). |
| `traefik.frontend.passHostHeader=true` | forward client `Host` header to the backend. |
| `traefik.frontend.priority=10` | override default frontend priority |
| `traefik.frontend.entryPoints=http,https` | assign this frontend to entry points `http` and `https`. Overrides `defaultEntryPoints`. |
| `traefik.frontend.auth.basic=EXPR` | Sets basic authentication for that frontend in CSV format: `User:Hash,User:Hash` |

View file

@ -1,55 +1,61 @@
# Etcd Backend # Etcd Backend
Træfik can be configured to use Etcd as a backend configuration: Træfik can be configured to use Etcd as a backend configuration.
```toml ```toml
################################################################ ################################################################
# Etcd configuration backend # Etcd configuration backend
################################################################ ################################################################
# Enable Etcd configuration backend # Enable Etcd configuration backend.
[etcd] [etcd]
# Etcd server endpoint # Etcd server endpoint.
# #
# Required # Required
# Default: "127.0.0.1:2379"
# #
endpoint = "127.0.0.1:2379" endpoint = "127.0.0.1:2379"
# Enable watch Etcd changes # Enable watch Etcd changes.
# #
# Optional # Optional
# Default: true
# #
watch = true watch = true
# Prefix used for KV store. # Prefix used for KV store.
# #
# Optional # Optional
# Default: "/traefik"
# #
prefix = "/traefik" prefix = "/traefik"
# Override default configuration template. For advanced users :) # Override default configuration template.
# For advanced users :)
# #
# Optional # Optional
# #
# filename = "etcd.tmpl" # filename = "etcd.tmpl"
# Use etcd user/pass authentication # Use etcd user/pass authentication.
# #
# Optional # Optional
# #
# username = foo # username = foo
# password = bar # password = bar
# Enable etcd TLS connection # Enable etcd TLS connection.
# #
# Optional # Optional
# #
# [etcd.tls] # [etcd.tls]
# ca = "/etc/ssl/ca.crt" # ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/etcd.crt" # cert = "/etc/ssl/etcd.crt"
# key = "/etc/ssl/etcd.key" # key = "/etc/ssl/etcd.key"
# insecureskipverify = true # insecureskipverify = true
``` ```
Please refer to the [Key Value storage structure](/user-guide/kv-config/#key-value-storage-structure) section to get documentation on traefik KV structure. To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).
Please refer to the [Key Value storage structure](/user-guide/kv-config/#key-value-storage-structure) section to get documentation on Traefik KV structure.

View file

@ -1,29 +1,30 @@
# Eureka Backend # Eureka Backend
Træfik can be configured to use Eureka as a backend configuration: Træfik can be configured to use Eureka as a backend configuration.
```toml ```toml
################################################################ ################################################################
# Eureka configuration backend # Eureka configuration backend
################################################################ ################################################################
# Enable Eureka configuration backend # Enable Eureka configuration backend.
[eureka] [eureka]
# Eureka server endpoint. # Eureka server endpoint.
# endpoint := "http://my.eureka.server/eureka"
# #
# Required # Required
# #
endpoint = "http://my.eureka.server/eureka" endpoint = "http://my.eureka.server/eureka"
# Override default configuration time between refresh # Override default configuration time between refresh.
# #
# Optional # Optional
# default 30s # Default: 30s
#
delay = "1m" delay = "1m"
# Override default configuration template. For advanced users :) # Override default configuration template.
# For advanced users :)
# #
# Optional # Optional
# #

View file

@ -1,6 +1,12 @@
# File Backends # File Backends
Like any other reverse proxy, Træfik can be configured with a file. You have three choices: Like any other reverse proxy, Træfik can be configured with a file.
You have three choices:
- [Simple](/configuration/backends/file/#simple)
- [Rules in a Separate File](/configuration/backends/file/#rules-in-a-separate-file)
- [Multiple `.toml` Files](/configuration/backends/file/#multiple-toml-files)
## Simple ## Simple
@ -145,7 +151,7 @@ filename = "rules.toml"
rule = "Path:/test" rule = "Path:/test"
``` ```
## Multiple .toml Files ## Multiple `.toml` Files
You could have multiple `.toml` files in a directory: You could have multiple `.toml` files in a directory:

View file

@ -1,30 +1,23 @@
# Kubernetes Ingress Backend # Kubernetes Ingress Backend
Træfik can be configured to use Kubernetes Ingress as a backend configuration: Træfik can be configured to use Kubernetes Ingress as a backend configuration.
See also [Kubernetes user guide](/user-guide/kubernetes).
## Configuration
```toml ```toml
################################################################ ################################################################
# Kubernetes Ingress configuration backend # Kubernetes Ingress configuration backend
################################################################ ################################################################
# Enable Kubernetes Ingress configuration backend
# Enable Kubernetes Ingress configuration backend.
[kubernetes] [kubernetes]
# Kubernetes server endpoint # Kubernetes server endpoint.
# #
# When deployed as a replication controller in Kubernetes, Traefik will use # Optional for in-cluster configuration, required otherwise.
# the environment variables KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT
# to construct the endpoint.
# Secure token will be found in /var/run/secrets/kubernetes.io/serviceaccount/token
# and SSL CA cert in /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
#
# The endpoint may be given to override the environment variable values.
#
# When the environment variables are not found, Traefik will try to connect to
# the Kubernetes API server with an external-cluster client. In this case, the
# endpoint is required. Specifically, it may be set to the URL used by
# `kubectl proxy` to connect to a Kubernetes cluster from localhost.
#
# Optional for in-cluster configuration, required otherwise
# Default: empty # Default: empty
# #
# endpoint = "http://localhost:8080" # endpoint = "http://localhost:8080"
@ -36,8 +29,8 @@ Træfik can be configured to use Kubernetes Ingress as a backend configuration:
# #
# token = "my token" # token = "my token"
# Path to the certificate authority file used for the Kubernetes client # Path to the certificate authority file.
# configuration. # Used for the Kubernetes client configuration.
# #
# Optional # Optional
# Default: empty # Default: empty
@ -52,35 +45,70 @@ Træfik can be configured to use Kubernetes Ingress as a backend configuration:
# namespaces = ["default", "production"] # namespaces = ["default", "production"]
# Ingress label selector to identify Ingress objects that should be processed. # Ingress label selector to identify Ingress objects that should be processed.
# See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors for details.
# #
# Optional # Optional
# Default: empty (process all Ingresses) # Default: empty (process all Ingresses)
# #
# labelselector = "A and not B" # labelselector = "A and not B"
# Disable PassHost Headers.
#
# Optional
# Default: false
#
# disablePassHostHeaders = true
``` ```
### `endpoint`
The Kubernetes server endpoint.
When deployed as a replication controller in Kubernetes, Traefik will use the environment variables `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT` to construct the endpoint.
Secure token will be found in `/var/run/secrets/kubernetes.io/serviceaccount/token` and SSL CA cert in `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt`
The endpoint may be given to override the environment variable values.
When the environment variables are not found, Traefik will try to connect to the Kubernetes API server with an external-cluster client.
In this case, the endpoint is required.
Specifically, it may be set to the URL used by `kubectl proxy` to connect to a Kubernetes cluster from localhost.
### `labelselector`
Ingress label selector to identify Ingress objects that should be processed.
See [label-selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) for details.
## Annotations
Annotations can be used on containers to override default behaviour for the whole Ingress resource: Annotations can be used on containers to override default behaviour for the whole Ingress resource:
- `traefik.frontend.rule.type: PathPrefixStrip`: override the default frontend rule type (Default: `PathPrefix`). - `traefik.frontend.rule.type: PathPrefixStrip`
- `traefik.frontend.priority: 3`: override the default frontend rule priority (Default: `len(Path)`). Override the default frontend rule type. Default: `PathPrefix`.
- `traefik.frontend.priority: 3`
Override the default frontend rule priority.
Annotations can be used on the Kubernetes service to override default behaviour: Annotations can be used on the Kubernetes service to override default behaviour:
- `traefik.backend.loadbalancer.method=drr`: override the default `wrr` load balancer algorithm - `traefik.backend.loadbalancer.method=drr`
- `traefik.backend.loadbalancer.sticky=true`: enable backend sticky sessions Override the default `wrr` load balancer algorithm
- `traefik.backend.loadbalancer.sticky=true`
Enable backend sticky sessions
You can find here an example [ingress](https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/cheese-ingress.yaml) and [replication controller](https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/traefik.yaml). You can find here an example [ingress](https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/cheese-ingress.yaml) and [replication controller](https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/traefik.yaml).
Additionally, an annotation can be used on Kubernetes services to set the [circuit breaker expression](https://docs.traefik.io/basics/#backends) for a backend. Additionally, an annotation can be used on Kubernetes services to set the [circuit breaker expression](/basics/#backends) for a backend.
- `traefik.backend.circuitbreaker: <expression>`: set the circuit breaker expression for the backend (Default: nil). - `traefik.backend.circuitbreaker: <expression>`
Set the circuit breaker expression for the backend. Default: `nil`.
As known from nginx when used as Kubernetes Ingress Controller, a List of IP-Ranges which are allowed to access can be configured by using an ingress annotation: As known from nginx when used as Kubernetes Ingress Controller, a list of IP-Ranges which are allowed to access can be configured by using an ingress annotation:
- `ingress.kubernetes.io/whitelist-source-range: "1.2.3.0/24, fe80::/16"` - `ingress.kubernetes.io/whitelist-source-range: "1.2.3.0/24, fe80::/16"`
An unset or empty list allows all Source-IPs to access. If one of the Net-Specifications are invalid, the whole list is invalid and allows all Source-IPs to access. An unset or empty list allows all Source-IPs to access.
If one of the Net-Specifications are invalid, the whole list is invalid and allows all Source-IPs to access.
### Authentication ### Authentication
@ -89,7 +117,8 @@ Is possible to add additional authentication annotations in the Ingress rule.
The source of the authentication is a secret that contains usernames and passwords inside the the key auth. The source of the authentication is a secret that contains usernames and passwords inside the the key auth.
- `ingress.kubernetes.io/auth-type`: `basic` - `ingress.kubernetes.io/auth-type`: `basic`
- `ingress.kubernetes.io/auth-secret`: contains the usernames and passwords with access to the paths defined in the Ingress Rule. - `ingress.kubernetes.io/auth-secret`
Contains the usernames and passwords with access to the paths defined in the Ingress Rule.
The secret must be created in the same namespace as the Ingress rule. The secret must be created in the same namespace as the Ingress rule.

View file

@ -1,26 +1,33 @@
# Marathon Backend # Marathon Backend
Træfik can be configured to use Marathon as a backend configuration: Træfik can be configured to use Marathon as a backend configuration.
See also [Marathon user guide](/user-guide/marathon).
## Configuration
```toml ```toml
################################################################ ################################################################
# Mesos/Marathon configuration backend # Mesos/Marathon configuration backend
################################################################ ################################################################
# Enable Marathon configuration backend # Enable Marathon configuration backend.
[marathon] [marathon]
# Marathon server endpoint. # Marathon server endpoint.
# You can also specify multiple endpoint for Marathon: # You can also specify multiple endpoint for Marathon:
# endpoint := "http://10.241.1.71:8080,10.241.1.72:8080,10.241.1.73:8080" # endpoint = "http://10.241.1.71:8080,10.241.1.72:8080,10.241.1.73:8080"
# #
# Required # Required
# Default: "http://127.0.0.1:8080"
# #
endpoint = "http://127.0.0.1:8080" endpoint = "http://127.0.0.1:8080"
# Enable watch Marathon changes # Enable watch Marathon changes.
# #
# Optional # Optional
# Default: true
# #
watch = true watch = true
@ -31,20 +38,21 @@ watch = true
# #
domain = "marathon.localhost" domain = "marathon.localhost"
# Override default configuration template. For advanced users :) # Override default configuration template.
# For advanced users :)
# #
# Optional # Optional
# #
# filename = "marathon.tmpl" # filename = "marathon.tmpl"
# Expose Marathon apps by default in traefik # Expose Marathon apps by default in Traefik.
# #
# Optional # Optional
# Default: true # Default: true
# #
# exposedByDefault = true # exposedByDefault = false
# Convert Marathon groups to subdomains # Convert Marathon groups to subdomains.
# Default behavior: /foo/bar/myapp => foo-bar-myapp.{defaultDomain} # Default behavior: /foo/bar/myapp => foo-bar-myapp.{defaultDomain}
# with groupsAsSubDomains enabled: /foo/bar/myapp => myapp.bar.foo.{defaultDomain} # with groupsAsSubDomains enabled: /foo/bar/myapp => myapp.bar.foo.{defaultDomain}
# #
@ -53,52 +61,54 @@ domain = "marathon.localhost"
# #
# groupsAsSubDomains = true # groupsAsSubDomains = true
# Enable compatibility with marathon-lb labels # Enable compatibility with marathon-lb labels.
# #
# Optional # Optional
# Default: false # Default: false
# #
# marathonLBCompatibility = true # marathonLBCompatibility = true
# Enable Marathon basic authentication # Enable Marathon basic authentication.
# #
# Optional # Optional
# #
# [marathon.basic] # [marathon.basic]
# httpBasicAuthUser = "foo" # httpBasicAuthUser = "foo"
# httpBasicPassword = "bar" # httpBasicPassword = "bar"
# TLS client configuration. https://golang.org/pkg/crypto/tls/#Config # TLS client configuration. https://golang.org/pkg/crypto/tls/#Config
# #
# Optional # Optional
# #
# [marathon.TLS] # [marathon.TLS]
# CA = "/etc/ssl/ca.crt" # CA = "/etc/ssl/ca.crt"
# Cert = "/etc/ssl/marathon.cert" # Cert = "/etc/ssl/marathon.cert"
# Key = "/etc/ssl/marathon.key" # Key = "/etc/ssl/marathon.key"
# InsecureSkipVerify = true # InsecureSkipVerify = true
# DCOSToken for DCOS environment, This will override the Authorization header # DCOSToken for DCOS environment.
# This will override the Authorization header.
# #
# Optional # Optional
# #
# dcosToken = "xxxxxx" # dcosToken = "xxxxxx"
# Override DialerTimeout # Override DialerTimeout.
# Amount of time to allow the Marathon provider to wait to open a TCP connection # Amount of time to allow the Marathon provider to wait to open a TCP connection
# to a Marathon master. # to a Marathon master.
# Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw # Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw
# values (digits). If no units are provided, the value is parsed assuming # values (digits).
# seconds. # If no units are provided, the value is parsed assuming seconds.
# #
# Optional # Optional
# Default: "60s" # Default: "60s"
#
# dialerTimeout = "60s" # dialerTimeout = "60s"
# Set the TCP Keep Alive interval for the Marathon HTTP Client. # Set the TCP Keep Alive interval for the Marathon HTTP Client.
# Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw # Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw
# values (digits). If no units are provided, the value is parsed assuming # values (digits).
# seconds. # If no units are provided, the value is parsed assuming seconds.
# #
# Optional # Optional
# Default: "10s" # Default: "10s"
@ -113,21 +123,28 @@ domain = "marathon.localhost"
# Optional # Optional
# Default: false # Default: false
# #
# forceTaskHostname = false # forceTaskHostname = true
# Applications may define readiness checks which are probed by Marathon during # Applications may define readiness checks which are probed by Marathon during
# deployments periodically and the results exposed via the API. Enabling the # deployments periodically and the results exposed via the API.
# following parameter causes Traefik to filter out tasks whose readiness checks # Enabling the following parameter causes Traefik to filter out tasks
# have not succeeded. # whose readiness checks have not succeeded.
# Note that the checks are only valid at deployment times. See the Marathon # Note that the checks are only valid at deployment times.
# guide for details. # See the Marathon guide for details.
# #
# Optional # Optional
# Default: false # Default: false
# #
# respectReadinessChecks = false # respectReadinessChecks = true
``` ```
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).
## Labels: overriding default behaviour
### On Containers
Labels can be used on containers to override default behaviour: Labels can be used on containers to override default behaviour:
| Label | Description | | Label | Description |
@ -151,6 +168,8 @@ Labels can be used on containers to override default behaviour:
| `traefik.frontend.entryPoints=http,https` | assign this frontend to entry points `http` and `https`. Overrides `defaultEntryPoints`. | | `traefik.frontend.entryPoints=http,https` | assign this frontend to entry points `http` and `https`. Overrides `defaultEntryPoints`. |
| `traefik.frontend.auth.basic=EXPR` | Sets basic authentication for that frontend in CSV format: `User:Hash,User:Hash`. | | `traefik.frontend.auth.basic=EXPR` | Sets basic authentication for that frontend in CSV format: `User:Hash,User:Hash`. |
### On Services
If several ports need to be exposed from a container, the services labels can be used: If several ports need to be exposed from a container, the services labels can be used:
| Label | Description | | Label | Description |

View file

@ -1,13 +1,13 @@
# Mesos Generic Backend # Mesos Generic Backend
Træfik can be configured to use Mesos as a backend configuration: Træfik can be configured to use Mesos as a backend configuration.
```toml ```toml
################################################################ ################################################################
# Mesos configuration backend # Mesos configuration backend
################################################################ ################################################################
# Enable Mesos configuration backend # Enable Mesos configuration backend.
[mesos] [mesos]
# Mesos server endpoint. # Mesos server endpoint.
@ -16,12 +16,14 @@ Træfik can be configured to use Mesos as a backend configuration:
# endpoint = "zk://192.168.35.20:2181,192.168.35.21:2181,192.168.35.22:2181/mesos" # endpoint = "zk://192.168.35.20:2181,192.168.35.21:2181,192.168.35.22:2181/mesos"
# #
# Required # Required
# Default: "http://127.0.0.1:5050"
# #
endpoint = "http://127.0.0.1:8080" endpoint = "http://127.0.0.1:8080"
# Enable watch Mesos changes # Enable watch Mesos changes.
# #
# Optional # Optional
# Default: true
# #
watch = true watch = true
@ -32,18 +34,19 @@ watch = true
# #
domain = "mesos.localhost" domain = "mesos.localhost"
# Override default configuration template. For advanced users :) # Override default configuration template.
# For advanced users :)
# #
# Optional # Optional
# #
# filename = "mesos.tmpl" # filename = "mesos.tmpl"
# Expose Mesos apps by default in traefik # Expose Mesos apps by default in Traefik.
# #
# Optional # Optional
# Default: false # Default: true
# #
# ExposedByDefault = true # ExposedByDefault = false
# TLS client configuration. https://golang.org/pkg/crypto/tls/#Config # TLS client configuration. https://golang.org/pkg/crypto/tls/#Config
# #
@ -52,30 +55,39 @@ domain = "mesos.localhost"
# [mesos.TLS] # [mesos.TLS]
# InsecureSkipVerify = true # InsecureSkipVerify = true
# Zookeeper timeout (in seconds) # Zookeeper timeout (in seconds).
# #
# Optional # Optional
# Default: 30 # Default: 30
# #
# ZkDetectionTimeout = 30 # ZkDetectionTimeout = 30
# Polling interval (in seconds) # Polling interval (in seconds).
# #
# Optional # Optional
# Default: 30 # Default: 30
# #
# RefreshSeconds = 30 # RefreshSeconds = 30
# IP sources (e.g. host, docker, mesos, rkt) # IP sources (e.g. host, docker, mesos, rkt).
# #
# Optional # Optional
# #
# IPSources = "host" # IPSources = "host"
# HTTP Timeout (in seconds) # HTTP Timeout (in seconds).
# #
# Optional # Optional
# Default: 30 # Default: 30
# #
# StateTimeoutSecond = "30" # StateTimeoutSecond = "30"
# Convert groups to subdomains.
# Default behavior: /foo/bar/myapp => foo-bar-myapp.{defaultDomain}
# with groupsAsSubDomains enabled: /foo/bar/myapp => myapp.bar.foo.{defaultDomain}
#
# Optional
# Default: false
#
# groupsAsSubDomains = true
``` ```

View file

@ -1,13 +1,15 @@
# Rancher Backend # Rancher Backend
Træfik can be configured to use Rancher as a backend configuration: Træfik can be configured to use Rancher as a backend configuration.
## Global Configuration
```toml ```toml
################################################################ ################################################################
# Rancher configuration backend # Rancher configuration backend
################################################################ ################################################################
# Enable Rancher configuration backend # Enable Rancher configuration backend.
[rancher] [rancher]
# Default domain used. # Default domain used.
@ -17,27 +19,28 @@ Træfik can be configured to use Rancher as a backend configuration:
# #
domain = "rancher.localhost" domain = "rancher.localhost"
# Enable watch Rancher changes # Enable watch Rancher changes.
# #
# Optional # Optional
# Default: true # Default: true
# #
watch = true watch = true
# Polling interval (in seconds) # Polling interval (in seconds).
# #
# Optional # Optional
# Default: 15
# #
refreshSeconds = 15 refreshSeconds = 15
# Expose Rancher services by default in traefik # Expose Rancher services by default in Traefik.
# #
# Optional # Optional
# Default: true # Default: true
# #
exposedByDefault = false exposedByDefault = false
# Filter services with unhealthy states and inactive states # Filter services with unhealthy states and inactive states.
# #
# Optional # Optional
# Default: false # Default: false
@ -45,18 +48,20 @@ exposedByDefault = false
enableServiceHealthFilter = true enableServiceHealthFilter = true
``` ```
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).
## Rancher Metadata Service ## Rancher Metadata Service
```toml ```toml
# Enable Rancher metadata service configuration backend instead of the API # Enable Rancher metadata service configuration backend instead of the API
# configuration backend # configuration backend.
# #
# Optional # Optional
# Default: false # Default: false
# #
[rancher.metadata] [rancher.metadata]
# Poll the Rancher metadata service for changes every `rancher.RefreshSeconds` # Poll the Rancher metadata service for changes every `rancher.RefreshSeconds`.
# NOTE: this is less accurate than the default long polling technique which # NOTE: this is less accurate than the default long polling technique which
# will provide near instantaneous updates to Traefik # will provide near instantaneous updates to Traefik
# #
@ -65,7 +70,7 @@ enableServiceHealthFilter = true
# #
intervalPoll = true intervalPoll = true
# Prefix used for accessing the Rancher metadata service # Prefix used for accessing the Rancher metadata service.
# #
# Optional # Optional
# Default: "/latest" # Default: "/latest"
@ -76,24 +81,24 @@ prefix = "/2016-07-29"
## Rancher API ## Rancher API
```toml ```toml
# Enable Rancher API configuration backend # Enable Rancher API configuration backend.
# #
# Optional # Optional
# Default: true # Default: true
# #
[rancher.api] [rancher.api]
# Endpoint to use when connecting to the Rancher API # Endpoint to use when connecting to the Rancher API.
# #
# Required # Required
endpoint = "http://rancherserver.example.com/v1" endpoint = "http://rancherserver.example.com/v1"
# AccessKey to use when connecting to the Rancher API # AccessKey to use when connecting to the Rancher API.
# #
# Required # Required
accessKey = "XXXXXXXXXXXXXXXXXXXX" accessKey = "XXXXXXXXXXXXXXXXXXXX"
# SecretKey to use when connecting to the Rancher API # SecretKey to use when connecting to the Rancher API.
# #
# Required # Required
secretKey = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" secretKey = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
@ -102,10 +107,10 @@ secretKey = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
!!! note !!! note
If Traefik needs access to the Rancher API, you need to set the `endpoint`, `accesskey` and `secretkey` parameters. If Traefik needs access to the Rancher API, you need to set the `endpoint`, `accesskey` and `secretkey` parameters.
To enable traefik to fetch information about the Environment it's deployed in only, you need to create an `Environment API Key`. To enable Traefik to fetch information about the Environment it's deployed in only, you need to create an `Environment API Key`.
This can be found within the API Key advanced options. This can be found within the API Key advanced options.
## Labels ## Labels: overriding default behaviour
Labels can be used on task containers to override default behaviour: Labels can be used on task containers to override default behaviour:

View file

@ -3,35 +3,33 @@
Træfik can be configured: Træfik can be configured:
- using a RESTful api. - using a RESTful api.
- to use a metric system (like Prometheus, DataDog or StatD, ...). - to use a monitoring system (like Prometheus, DataDog or StatD, ...).
- to expose a Web Dashboard. - to expose a Web Dashboard.
## Configuration ## Configuration
```toml ```toml
# Enable web backend.
[web] [web]
# Web administration port # Web administration port.
# #
# Required # Required
# Default: ":8080"
# #
address = ":8080" address = ":8080"
# SSL certificate and key used # SSL certificate and key used.
# #
# Optional # Optional
# #
# CertFile = "traefik.crt" # certFile = "traefik.crt"
# KeyFile = "traefik.key" # keyFile = "traefik.key"
# Set REST API to read-only mode # Set REST API to read-only mode.
# #
# Optional # Optional
# ReadOnly = false # readOnly = false
# Enable more detailed statistics
# [web.statistics]
# RecentErrors = 10
``` ```
## Web UI ## Web UI
@ -42,7 +40,10 @@ address = ":8080"
### Authentication ### Authentication
- Basic Authentication !!! note
The `/ping` path of the api is excluded from authentication (since 1.4).
#### Basic Authentication
Passwords can be encoded in MD5, SHA1 and BCrypt: you can use `htpasswd` to generate those ones. Passwords can be encoded in MD5, SHA1 and BCrypt: you can use `htpasswd` to generate those ones.
@ -50,13 +51,18 @@ Users can be specified directly in the toml file, or indirectly by referencing a
if both are provided, the two are merged, with external file contents having precedence. if both are provided, the two are merged, with external file contents having precedence.
```toml ```toml
[web]
# ...
# To enable basic auth on the webui with 2 user/pass: test:test and test2:test2 # To enable basic auth on the webui with 2 user/pass: test:test and test2:test2
[web.auth.basic] [web.auth.basic]
users = ["test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"] users = ["test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"]
usersFile = "/path/to/.htpasswd" usersFile = "/path/to/.htpasswd"
# ...
``` ```
- Digest Authentication #### Digest Authentication
You can use `htdigest` to generate those ones. You can use `htdigest` to generate those ones.
@ -64,10 +70,15 @@ Users can be specified directly in the toml file, or indirectly by referencing a
if both are provided, the two are merged, with external file contents having precedence if both are provided, the two are merged, with external file contents having precedence
```toml ```toml
[web]
# ...
# To enable digest auth on the webui with 2 user/realm/pass: test:traefik:test and test2:traefik:test2 # To enable digest auth on the webui with 2 user/realm/pass: test:traefik:test and test2:traefik:test2
[web.auth.digest] [web.auth.digest]
users = ["test:traefik:a2688e031edb4be6a3797f3882655c05 ", "test2:traefik:518845800f9e2bfb1f1f740ec24f074e"] users = ["test:traefik:a2688e031edb4be6a3797f3882655c05 ", "test2:traefik:518845800f9e2bfb1f1f740ec24f074e"]
usersFile = "/path/to/.htdigest" usersFile = "/path/to/.htdigest"
# ...
``` ```
@ -75,32 +86,96 @@ Users can be specified directly in the toml file, or indirectly by referencing a
You can enable Traefik to export internal metrics to different monitoring systems. You can enable Traefik to export internal metrics to different monitoring systems.
- Prometheus ### Prometheus
```toml ```toml
[web]
# ...
# To enable Traefik to export internal metrics to Prometheus # To enable Traefik to export internal metrics to Prometheus
[web.metrics.prometheus] [web.metrics.prometheus]
Buckets=[0.1,0.3,1.2,5.0]
# Buckets for latency metrics
#
# Optional
# Default: [0.1, 0.3, 1.2, 5]
buckets=[0.1,0.3,1.2,5.0]
# ...
``` ```
- DataDog ### DataDog
```toml ```toml
[web]
# ...
# DataDog metrics exporter type # DataDog metrics exporter type
[web.metrics.datadog] [web.metrics.datadog]
Address = "localhost:8125"
Pushinterval = "10s" # DataDog's address.
#
# Required
# Default: "localhost:8125"
#
address = "localhost:8125"
# DataDog push interval
#
# Optional
# Default: "10s"
#
pushinterval = "10s"
# ...
``` ```
- StatsD ### StatsD
```toml ```toml
[web]
# ...
# StatsD metrics exporter type # StatsD metrics exporter type
[web.metrics.statsd] [web.metrics.statsd]
Address = "localhost:8125"
Pushinterval = "10s" # StatD's address.
#
# Required
# Default: "localhost:8125"
#
address = "localhost:8125"
# StatD push interval
#
# Optional
# Default: "10s"
#
pushinterval = "10s"
# ...
``` ```
## Statistics
```toml
[web]
# ...
# Enable more detailed statistics.
[web.statistics]
# Number of recent errors logged.
#
# Default: 10
#
recentErrors = 10
# ...
```
## API ## API
| Path | Method | Description | | Path | Method | Description |
@ -126,7 +201,7 @@ You can enable Traefik to export internal metrics to different monitoring system
#### Ping #### Ping
```shell ```shell
$ curl -sv "http://localhost:8080/ping" curl -sv "http://localhost:8080/ping"
``` ```
```shell ```shell
* Trying ::1... * Trying ::1...
@ -148,7 +223,7 @@ OK
#### Health #### Health
```shell ```shell
$ curl -s "http://localhost:8080/health" | jq . curl -s "http://localhost:8080/health" | jq .
``` ```
```json ```json
{ {
@ -209,7 +284,7 @@ $ curl -s "http://localhost:8080/health" | jq .
#### Provider configurations #### Provider configurations
```shell ```shell
$ curl -s "http://localhost:8080/api" | jq . curl -s "http://localhost:8080/api" | jq .
``` ```
```json ```json
{ {

View file

@ -1,38 +1,61 @@
# Zookeeper Backend # Zookeeper Backend
Træfik can be configured to use Zookeeper as a backend configuration: Træfik can be configured to use Zookeeper as a backend configuration.
```toml ```toml
################################################################ ################################################################
# Zookeeper configuration backend # Zookeeper configuration backend
################################################################ ################################################################
# Enable Zookeeperconfiguration backend # Enable Zookeeperconfiguration backend.
[zookeeper] [zookeeper]
# Zookeeper server endpoint # Zookeeper server endpoint.
# #
# Required # Required
# Default: "127.0.0.1:2181"
# #
endpoint = "127.0.0.1:2181" endpoint = "127.0.0.1:2181"
# Enable watch Zookeeper changes # Enable watch Zookeeper changes.
# #
# Optional # Optional
# Default: true
# #
watch = true watch = true
# Prefix used for KV store. # Prefix used for KV store.
# #
# Optional # Optional
# Default: "/traefik"
# #
prefix = "traefik" prefix = "/traefik"
# Override default configuration template. For advanced users :) # Override default configuration template.
# For advanced users :)
# #
# Optional # Optional
# #
# filename = "zookeeper.tmpl" # filename = "zookeeper.tmpl"
# Use Zookeeper user/pass authentication.
#
# Optional
#
# username = foo
# password = bar
# Enable Zookeeper TLS connection.
#
# Optional
#
# [zookeeper.tls]
# ca = "/etc/ssl/ca.crt"
# cert = "/etc/ssl/zookeeper.crt"
# key = "/etc/ssl/zookeeper.key"
# insecureskipverify = true
``` ```
Please refer to the [Key Value storage structure](/user-guide/kv-config/#key-value-storage-structure) section to get documentation on traefik KV structure. To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).
Please refer to the [Key Value storage structure](/user-guide/kv-config/#key-value-storage-structure) section to get documentation on Traefik KV structure.

View file

@ -4,45 +4,34 @@
```toml ```toml
# Duration to give active requests a chance to finish before Traefik stops. # Duration to give active requests a chance to finish before Traefik stops.
# Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).
# If no units are provided, the value is parsed assuming seconds.
# Note: in this time frame no new requests are accepted.
# #
# Optional # Optional
# Default: "10s" # Default: "10s"
# #
# graceTimeOut = "10s" # graceTimeOut = "10s"
# Enable debug mode # Enable debug mode.
# #
# Optional # Optional
# Default: false # Default: false
# #
# debug = true # debug = true
# Periodically check if a new version has been released # Periodically check if a new version has been released.
# #
# Optional # Optional
# Default: true # Default: true
# #
# checkNewVersion = false # checkNewVersion = false
# Backends throttle duration: minimum duration in seconds between 2 events from providers # Backends throttle duration.
# before applying a new configuration. It avoids unnecessary reloads if multiples events
# are sent in a short amount of time.
# Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw
# values (digits). If no units are provided, the value is parsed assuming
# seconds.
# #
# Optional # Optional
# Default: "2s" # Default: "2s"
# #
# ProvidersThrottleDuration = "2s" # ProvidersThrottleDuration = "2s"
# Controls the maximum idle (keep-alive) connections to keep per-host. If zero, DefaultMaxIdleConnsPerHost # Controls the maximum idle (keep-alive) connections to keep per-host.
# from the Go standard library net/http module is used.
# If you encounter 'too many open files' errors, you can either increase this
# value or change the `ulimit`.
# #
# Optional # Optional
# Default: 200 # Default: 200
@ -50,15 +39,15 @@
# MaxIdleConnsPerHost = 200 # MaxIdleConnsPerHost = 200
# If set to true invalid SSL certificates are accepted for backends. # If set to true invalid SSL certificates are accepted for backends.
# Note: This disables detection of man-in-the-middle attacks so should only be used on secure backend networks. # This disables detection of man-in-the-middle attacks so should only be used on secure backend networks.
# #
# Optional # Optional
# Default: false # Default: false
# #
# InsecureSkipVerify = true # InsecureSkipVerify = true
# Register Certificates in the RootCA. This certificates will be use for backends calls. # Register Certificates in the RootCA.
# Note: You can use file path or cert content directly #
# Optional # Optional
# Default: [] # Default: []
# #
@ -73,6 +62,29 @@
# defaultEntryPoints = ["http", "https"] # defaultEntryPoints = ["http", "https"]
``` ```
- `graceTimeOut`: Duration to give active requests a chance to finish before Traefik stops.
Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).
If no units are provided, the value is parsed assuming seconds.
**Note:** in this time frame no new requests are accepted.
- `ProvidersThrottleDuration`: Backends throttle duration: minimum duration in seconds between 2 events from providers before applying a new configuration.
It avoids unnecessary reloads if multiples events are sent in a short amount of time.
Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).
If no units are provided, the value is parsed assuming seconds.
- `MaxIdleConnsPerHost`: Controls the maximum idle (keep-alive) connections to keep per-host.
If zero, `DefaultMaxIdleConnsPerHost` from the Go standard library net/http module is used.
If you encounter 'too many open files' errors, you can either increase this value or change the `ulimit`.
- `InsecureSkipVerify` : If set to true invalid SSL certificates are accepted for backends.
**Note:** This disables detection of man-in-the-middle attacks so should only be used on secure backend networks.
- `RootCAs`: Register Certificates in the RootCA. This certificates will be use for backends calls.
**Note** You can use file path or cert content directly
- `defaultEntryPoints`: Entrypoints to be used by frontends that do not specify any entrypoint.
Each frontend can specify its own entrypoints.
## Constraints ## Constraints
@ -80,18 +92,6 @@ In a micro-service architecture, with a central service discovery, setting const
Træfik filters services according to service attributes/tags set in your configuration backends. Træfik filters services according to service attributes/tags set in your configuration backends.
Supported backends:
- Docker
- Consul K/V
- BoltDB
- Zookeeper
- Etcd
- Consul Catalog
- Rancher
- Marathon
- Kubernetes (using a provider-specific mechanism based on label selectors)
Supported filters: Supported filters:
- `tag` - `tag`
@ -120,14 +120,27 @@ constraints = ["tag!=us-*", "tag!=asia-*"]
### Backend-specific ### Backend-specific
Supported backends:
- Docker
- Consul K/V
- BoltDB
- Zookeeper
- Etcd
- Consul Catalog
- Rancher
- Marathon
- Kubernetes (using a provider-specific mechanism based on label selectors)
```toml ```toml
# Backend-specific constraint # Backend-specific constraint
[consulCatalog] [consulCatalog]
endpoint = "127.0.0.1:8500" # ...
constraints = ["tag==api"] constraints = ["tag==api"]
# Backend-specific constraint
[marathon] [marathon]
endpoint = "127.0.0.1:8800" # ...
constraints = ["tag==api", "tag!=v*-beta"] constraints = ["tag==api", "tag!=v*-beta"]
``` ```
@ -190,14 +203,17 @@ Traefik will close and reopen its log files, assuming they're configured, on rec
This allows the logs to be rotated and processed by an external program, such as `logrotate`. This allows the logs to be rotated and processed by an external program, such as `logrotate`.
!!! note !!! note
that this does not work on Windows due to the lack of USR signals. This does not work on Windows due to the lack of USR signals.
## Custom Error pages ## Custom Error pages
Custom error pages can be returned, in lieu of the default, according to frontend-configured ranges of HTTP Status codes. Custom error pages can be returned, in lieu of the default, according to frontend-configured ranges of HTTP Status codes.
In the example below, if a 503 status is returned from the frontend "website", the custom error page at http://2.3.4.5/503.html is returned with the actual status code set in the HTTP header. In the example below, if a 503 status is returned from the frontend "website", the custom error page at http://2.3.4.5/503.html is returned with the actual status code set in the HTTP header.
Note, the `503.html` page itself is not hosted on traefik, but some other infrastructure.
!!! note
The `503.html` page itself is not hosted on Traefik, but some other infrastructure.
```toml ```toml
[frontends] [frontends]
@ -226,6 +242,9 @@ Instead, the query parameter can also be set to some generic error page like so:
Now the `500s.html` error page is returned for the configured code range. Now the `500s.html` error page is returned for the configured code range.
The configured status code ranges are inclusive; that is, in the above example, the `500s.html` page will be returned for status codes `500` through, and including, `599`. The configured status code ranges are inclusive; that is, in the above example, the `500s.html` page will be returned for status codes `500` through, and including, `599`.
Custom error pages are easiest to implement using the file provider.
For dynamic providers, the corresponding template file needs to be customized accordingly and referenced in the Traefik configuration.
## Retry Configuration ## Retry Configuration
@ -248,12 +267,7 @@ The configured status code ranges are inclusive; that is, in the above example,
# Enable custom health check options. # Enable custom health check options.
[healthcheck] [healthcheck]
# Set the default health check interval. Will only be effective if health check # Set the default health check interval.
# paths are defined. Given provider-specific support, the value may be
# overridden on a per-backend basis.
# Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw
# values (digits). If no units are provided, the value is parsed assuming
# seconds.
# #
# Optional # Optional
# Default: "30s" # Default: "30s"
@ -261,6 +275,11 @@ The configured status code ranges are inclusive; that is, in the above example,
# interval = "30s" # interval = "30s"
``` ```
- `interval` set the default health check interval.
Will only be effective if health check paths are defined.
Given provider-specific support, the value may be overridden on a per-backend basis.
Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).
If no units are provided, the value is parsed assuming seconds.
## Timeouts ## Timeouts
@ -272,20 +291,13 @@ The configured status code ranges are inclusive; that is, in the above example,
[respondingTimeouts] [respondingTimeouts]
# readTimeout is the maximum duration for reading the entire request, including the body. # readTimeout is the maximum duration for reading the entire request, including the body.
# If zero, no timeout exists.
# Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw
# values (digits). If no units are provided, the value is parsed assuming seconds.
# #
# Optional # Optional
# Default: "0s" # Default: "0s"
# #
# readTimeout = "5s" # readTimeout = "5s"
# writeTimeout is the maximum duration before timing out writes of the response. It covers the time from the end of # writeTimeout is the maximum duration before timing out writes of the response.
# the request header read to the end of the response write.
# If zero, no timeout exists.
# Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw
# values (digits). If no units are provided, the value is parsed assuming seconds.
# #
# Optional # Optional
# Default: "0s" # Default: "0s"
@ -293,9 +305,6 @@ The configured status code ranges are inclusive; that is, in the above example,
# writeTimeout = "5s" # writeTimeout = "5s"
# idleTimeout is the maximum duration an idle (keep-alive) connection will remain idle before closing itself. # idleTimeout is the maximum duration an idle (keep-alive) connection will remain idle before closing itself.
# If zero, no timeout exists.
# Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw
# values (digits). If no units are provided, the value is parsed assuming seconds.
# #
# Optional # Optional
# Default: "180s" # Default: "180s"
@ -303,6 +312,22 @@ The configured status code ranges are inclusive; that is, in the above example,
# idleTimeout = "360s" # idleTimeout = "360s"
``` ```
- `readTimeout` is the maximum duration for reading the entire request, including the body.
If zero, no timeout exists.
Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).
If no units are provided, the value is parsed assuming seconds.
- `writeTimeout` is the maximum duration before timing out writes of the response.
It covers the time from the end of the request header read to the end of the response write.
If zero, no timeout exists.
Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).
If no units are provided, the value is parsed assuming seconds.
- `idleTimeout` is the maximum duration an idle (keep-alive) connection will remain idle before closing itself.
If zero, no timeout exists.
Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).
If no units are provided, the value is parsed assuming seconds.
### Forwarding Timeouts ### Forwarding Timeouts
`forwardingTimeouts` are timeouts for requests forwarded to the backend servers. `forwardingTimeouts` are timeouts for requests forwarded to the backend servers.
@ -311,9 +336,6 @@ The configured status code ranges are inclusive; that is, in the above example,
[forwardingTimeouts] [forwardingTimeouts]
# dialTimeout is the amount of time to wait until a connection to a backend server can be established. # dialTimeout is the amount of time to wait until a connection to a backend server can be established.
# If zero, no timeout exists.
# Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw
# values (digits). If no units are provided, the value is parsed assuming seconds.
# #
# Optional # Optional
# Default: "30s" # Default: "30s"
@ -321,9 +343,6 @@ The configured status code ranges are inclusive; that is, in the above example,
# dialTimeout = "30s" # dialTimeout = "30s"
# responseHeaderTimeout is the amount of time to wait for a server's response headers after fully writing the request (including its body, if any). # responseHeaderTimeout is the amount of time to wait for a server's response headers after fully writing the request (including its body, if any).
# If zero, no timeout exists.
# Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw
# values (digits). If no units are provided, the value is parsed assuming seconds.
# #
# Optional # Optional
# Default: "0s" # Default: "0s"
@ -331,6 +350,17 @@ The configured status code ranges are inclusive; that is, in the above example,
# responseHeaderTimeout = "0s" # responseHeaderTimeout = "0s"
``` ```
- `dialTimeout` is the amount of time to wait until a connection to a backend server can be established.
If zero, no timeout exists.
Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).
If no units are provided, the value is parsed assuming seconds.
- `responseHeaderTimeout` is the amount of time to wait for a server's response headers after fully writing the request (including its body, if any).
If zero, no timeout exists.
Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).
If no units are provided, the value is parsed assuming seconds.
### Idle Timeout (deprecated) ### Idle Timeout (deprecated)
Use [respondingTimeouts](/configuration/commons/#responding-timeouts) instead of `IdleTimeout`. Use [respondingTimeouts](/configuration/commons/#responding-timeouts) instead of `IdleTimeout`.

View file

@ -109,23 +109,55 @@ Users can be specified directly in the toml file, or indirectly by referencing a
usersFile = "/path/to/.htdigest" usersFile = "/path/to/.htdigest"
``` ```
### Forward Authentication
This configuration will first forward the request to `http://authserver.com/auth`.
If the response code is 2XX, access is granted and the original request is performed.
Otherwise, the response from the auth server is returned.
```toml
[entryPoints]
[entrypoints.http]
# ...
# To enable forward auth on an entrypoint
[entrypoints.http.auth.forward]
address = "https://authserver.com/auth"
# Trust existing X-Forwarded-* headers.
# Useful with another reverse proxy in front of Traefik.
#
# Optional
# Default: false
#
trustForwardHeader = true
# Enable forward auth TLS connection.
#
# Optional
#
[entrypoints.http.auth.forward.tls]
cert = "authserver.crt"
key = "authserver.key"
```
## Specify Minimum TLS Version ## Specify Minimum TLS Version
To specify an https entrypoint with a minimum TLS version, and specifying an array of cipher suites (from crypto/tls). To specify an https entry point with a minimum TLS version, and specifying an array of cipher suites (from crypto/tls).
```toml ```toml
[entryPoints] [entryPoints]
[entryPoints.https] [entryPoints.https]
address = ":443" address = ":443"
[entryPoints.https.tls] [entryPoints.https.tls]
MinVersion = "VersionTLS12" minVersion = "VersionTLS12"
CipherSuites = ["TLS_RSA_WITH_AES_256_GCM_SHA384"] cipherSuites = ["TLS_RSA_WITH_AES_256_GCM_SHA384"]
[[entryPoints.https.tls.certificates]] [[entryPoints.https.tls.certificates]]
CertFile = "integration/fixtures/https/snitest.com.cert" certFile = "integration/fixtures/https/snitest.com.cert"
KeyFile = "integration/fixtures/https/snitest.com.key" keyFile = "integration/fixtures/https/snitest.com.key"
[[entryPoints.https.tls.certificates]] [[entryPoints.https.tls.certificates]]
CertFile = "integration/fixtures/https/snitest.org.cert" certFile = "integration/fixtures/https/snitest.org.cert"
KeyFile = "integration/fixtures/https/snitest.org.key" keyFile = "integration/fixtures/https/snitest.org.key"
``` ```
## Compression ## Compression

4
docs/img/grpc.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 186 KiB

View file

@ -4,7 +4,7 @@
[![Build Status SemaphoreCI](https://semaphoreci.com/api/v1/containous/traefik/branches/master/shields_badge.svg)](https://semaphoreci.com/containous/traefik) [![Build Status SemaphoreCI](https://semaphoreci.com/api/v1/containous/traefik/branches/master/shields_badge.svg)](https://semaphoreci.com/containous/traefik)
[![Docs](https://img.shields.io/badge/docs-current-brightgreen.svg)](https://docs.traefik.io) [![Docs](https://img.shields.io/badge/docs-current-brightgreen.svg)](https://docs.traefik.io)
[![Go Report Card](https://goreportcard.com/badge/kubernetes/helm)](https://goreportcard.com/report/containous/traefik) [![Go Report Card](https://goreportcard.com/badge/github.com/containous/traefik)](https://goreportcard.com/report/github.com/containous/traefik)
[![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/containous/traefik/blob/master/LICENSE.md) [![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/containous/traefik/blob/master/LICENSE.md)
[![Join the chat at https://traefik.herokuapp.com](https://img.shields.io/badge/style-register-green.svg?style=social&label=Slack)](https://traefik.herokuapp.com) [![Join the chat at https://traefik.herokuapp.com](https://img.shields.io/badge/style-register-green.svg?style=social&label=Slack)](https://traefik.herokuapp.com)
[![Twitter](https://img.shields.io/twitter/follow/traefikproxy.svg?style=social)](https://twitter.com/intent/follow?screen_name=traefikproxy) [![Twitter](https://img.shields.io/twitter/follow/traefikproxy.svg?style=social)](https://twitter.com/intent/follow?screen_name=traefikproxy)
@ -37,9 +37,9 @@ Run it and forget it!
## Features ## Features
- [It's fast](https://docs.traefik.io/benchmarks) - [It's fast](/benchmarks)
- No dependency hell, single binary made with go - No dependency hell, single binary made with go
- [Tiny](https://microbadger.com/images/traefik) [official](https://hub.docker.com/r/_/traefik/) official docker image - [Tiny](https://microbadger.com/images/traefik) [official](https://hub.docker.com/r/_/traefik/) docker image
- Rest API - Rest API
- Hot-reloading of configuration. No need to restart the process - Hot-reloading of configuration. No need to restart the process
- Circuit breakers, retry - Circuit breakers, retry

View file

@ -9,7 +9,12 @@
.md-typeset__table td code { .md-typeset__table td code {
word-break: unset; word-break: unset;
} }
.md-typeset__table tr :nth-child(1) { .md-typeset__table tr :nth-child(1) {
word-wrap: break-word; word-wrap: break-word;
max-width: 30em; max-width: 30em;
} }
p {
text-align: justify;
}

View file

@ -1,21 +1,25 @@
# Clustering / High Availability (beta) # Clustering / High Availability (beta)
This guide explains how to use Træfik in high availability mode. This guide explains how to use Træfik in high availability mode.
In order to deploy and configure multiple Træfik instances, without copying the same configuration file on each instance, we will use a distributed Key-Value store. In order to deploy and configure multiple Træfik instances, without copying the same configuration file on each instance, we will use a distributed Key-Value store.
## Prerequisites ## Prerequisites
You will need a working KV store cluster. You will need a working KV store cluster.
_(Currently, we recommend [Consul](https://consul.io) .)_
## File configuration to KV store migration ## File configuration to KV store migration
We created a special Træfik command to help configuring your Key Value store from a Træfik TOML configuration file. We created a special Træfik command to help configuring your Key Value store from a Træfik TOML configuration file.
Please refer to [this section](/user-guide/kv-config/#store-configuration-in-key-value-store) to get more details. Please refer to [this section](/user-guide/kv-config/#store-configuration-in-key-value-store) to get more details.
## Deploy a Træfik cluster ## Deploy a Træfik cluster
Once your Træfik configuration is uploaded on your KV store, you can start each Træfik instance. Once your Træfik configuration is uploaded on your KV store, you can start each Træfik instance.
A Træfik cluster is based on a manager/worker model. A Træfik cluster is based on a manager/worker model.
When starting, Træfik will elect a manager. When starting, Træfik will elect a manager.
If this instance fails, another manager will be automatically elected. If this instance fails, another manager will be automatically elected.

View file

@ -1,19 +1,24 @@
# Docker & Traefik # Docker & Traefik
In this use case, we want to use Traefik as a _layer-7_ load balancer with SSL termination for a set of micro-services used to run a web application. In this use case, we want to use Traefik as a _layer-7_ load balancer with SSL termination for a set of micro-services used to run a web application.
We also want to automatically _discover any services_ on the Docker host and let Traefik reconfigure itself automatically when containers get created (or shut down) so HTTP traffic can be routed accordingly. We also want to automatically _discover any services_ on the Docker host and let Traefik reconfigure itself automatically when containers get created (or shut down) so HTTP traffic can be routed accordingly.
In addition, we want to use Let's Encrypt to automatically generate and renew SSL certificates per hostname. In addition, we want to use Let's Encrypt to automatically generate and renew SSL certificates per hostname.
## Setting Up ## Setting Up
In order for this to work, you'll need a server with a public IP address, with Docker installed on it. In order for this to work, you'll need a server with a public IP address, with Docker installed on it.
In this example, we're using the fictitious domain _my-awesome-app.org_. In this example, we're using the fictitious domain _my-awesome-app.org_.
In real-life, you'll want to use your own domain and have the DNS configured accordingly so the hostname records you'll want to use point to the aforementioned public IP address. In real-life, you'll want to use your own domain and have the DNS configured accordingly so the hostname records you'll want to use point to the aforementioned public IP address.
## Networking ## Networking
Docker containers can only communicate with each other over TCP when they share at least one network. Docker containers can only communicate with each other over TCP when they share at least one network.
This makes sense from a topological point of view in the context of networking, since Docker under the hood creates IPTable rules so containers can't reach other containers _unless you'd want to_. This makes sense from a topological point of view in the context of networking, since Docker under the hood creates IPTable rules so containers can't reach other containers _unless you'd want to_.
In this example, we're going to use a single network called `web` where all containers that are handling HTTP traffic (including Traefik) will reside in. In this example, we're going to use a single network called `web` where all containers that are handling HTTP traffic (including Traefik) will reside in.
On the Docker host, run the following command: On the Docker host, run the following command:
@ -37,6 +42,7 @@ touch /opt/traefik/traefik.toml
``` ```
The `docker-compose.yml` file will provide us with a simple, consistent and more importantly, a deterministic way to create Traefik. The `docker-compose.yml` file will provide us with a simple, consistent and more importantly, a deterministic way to create Traefik.
The contents of the file is as follows: The contents of the file is as follows:
```yaml ```yaml
@ -62,10 +68,10 @@ networks:
external: true external: true
``` ```
As you can see, we're mounting the `traefik.toml` file as well as the (empty) `acme.json` file in the container. As you can see, we're mounting the `traefik.toml` file as well as the (empty) `acme.json` file in the container.
Also, we're mounting the `/var/run/docker.sock` Docker socket in the container as well, so Traefik can listen to Docker events and reconfigure it's own internal configuration when containers are created (or shut down). Also, we're mounting the `/var/run/docker.sock` Docker socket in the container as well, so Traefik can listen to Docker events and reconfigure it's own internal configuration when containers are created (or shut down).
Also, we're making sure the container is automatically restarted by the Docker engine in case of problems (or: if the server is rebooted). Also, we're making sure the container is automatically restarted by the Docker engine in case of problems (or: if the server is rebooted).
We're publishing the default HTTP ports `80` and `443` on the host, and making sure the container is placed within the `web` network we've created earlier on. We're publishing the default HTTP ports `80` and `443` on the host, and making sure the container is placed within the `web` network we've created earlier on.
Finally, we're giving this container a static name called `traefik`. Finally, we're giving this container a static name called `traefik`.
Let's take a look at a simply `traefik.toml` configuration as well before we'll create the Traefik container: Let's take a look at a simply `traefik.toml` configuration as well before we'll create the Traefik container:
@ -106,7 +112,8 @@ This is the minimum configuration required to do the following:
- Check for new versions of Traefik periodically - Check for new versions of Traefik periodically
- Create two entry points, namely an `HTTP` endpoint on port `80`, and an `HTTPS` endpoint on port `443` where all incoming traffic on port `80` will immediately get redirected to `HTTPS`. - Create two entry points, namely an `HTTP` endpoint on port `80`, and an `HTTPS` endpoint on port `443` where all incoming traffic on port `80` will immediately get redirected to `HTTPS`.
- Enable the Docker configuration backend and listen for container events on the Docker unix socket we've mounted earlier. However, **new containers will not be exposed by Traefik by default, we'll get into this in a bit!** - Enable the Docker configuration backend and listen for container events on the Docker unix socket we've mounted earlier. However, **new containers will not be exposed by Traefik by default, we'll get into this in a bit!**
- Enable automatic request and configuration of SSL certificates using Let's Encrypt. These certificates will be stored in the `acme.json` file, which you can back-up yourself and store off-premises. - Enable automatic request and configuration of SSL certificates using Let's Encrypt.
These certificates will be stored in the `acme.json` file, which you can back-up yourself and store off-premises.
Alright, let's boot the container. From the `/opt/traefik` directory, run `docker-compose up -d` which will create and start the Traefik container. Alright, let's boot the container. From the `/opt/traefik` directory, run `docker-compose up -d` which will create and start the Traefik container.
@ -114,7 +121,9 @@ Alright, let's boot the container. From the `/opt/traefik` directory, run `docke
Now that we've fully configured and started Traefik, it's time to get our applications running! Now that we've fully configured and started Traefik, it's time to get our applications running!
Let's take a simple example of a micro-service project consisting of various services, where some will be exposed to the outside world and some will not. The `docker-compose.yml` of our project looks like this: Let's take a simple example of a micro-service project consisting of various services, where some will be exposed to the outside world and some will not.
The `docker-compose.yml` of our project looks like this:
```yaml ```yaml
version: "2.1" version: "2.1"
@ -173,16 +182,19 @@ networks:
external: true external: true
``` ```
Here, we can see a set of services with two applications that we're actually exposing to the outside world. Here, we can see a set of services with two applications that we're actually exposing to the outside world.
Notice how there isn't a single container that has any published ports to the host -- everything is routed through Docker networks. Notice how there isn't a single container that has any published ports to the host -- everything is routed through Docker networks.
Also, only the containers that we want traffic to get routed to are attached to the `web` network we created at the start of this document. Also, only the containers that we want traffic to get routed to are attached to the `web` network we created at the start of this document.
Since the `traefik` container we've created and started earlier is also attached to this network, HTTP requests can now get routed to these containers. Since the `traefik` container we've created and started earlier is also attached to this network, HTTP requests can now get routed to these containers.
### Labels ### Labels
As mentioned earlier, we don't want containers exposed automatically by Traefik. As mentioned earlier, we don't want containers exposed automatically by Traefik.
The reason behind this is simple: we want to have control over this process ourselves. The reason behind this is simple: we want to have control over this process ourselves.
Thanks to Docker labels, we can tell Traefik how to create it's internal routing configuration. Thanks to Docker labels, we can tell Traefik how to create it's internal routing configuration.
Let's take a look at the labels themselves for the `app` service, which is a HTTP webservice listing on port 9000: Let's take a look at the labels themselves for the `app` service, which is a HTTP webservice listing on port 9000:
```yaml ```yaml
@ -194,14 +206,17 @@ Let's take a look at the labels themselves for the `app` service, which is a HTT
``` ```
First, we specify the `backend` name which corresponds to the actual service we're routing **to**. First, we specify the `backend` name which corresponds to the actual service we're routing **to**.
We also tell Traefik to use the `web` network to route HTTP traffic to this container. With the `frontend.rule` label, we tell Traefik that we want to route to this container if the incoming HTTP request contains the `Host` `app.my-awesome-app.org`.
Essentially, this is the actual rule used for Layer-7 load balancing. We also tell Traefik to use the `web` network to route HTTP traffic to this container.
With the `frontend.rule` label, we tell Traefik that we want to route to this container if the incoming HTTP request contains the `Host` `app.my-awesome-app.org`.
Essentially, this is the actual rule used for Layer-7 load balancing.
With the `traefik.enable` label, we tell Traefik to include this container in it's internal configuration. With the `traefik.enable` label, we tell Traefik to include this container in it's internal configuration.
Finally but not unimportantly, we tell Traefik to route **to** port `9000`, since that is the actual TCP/IP port the container actually listens on. Finally but not unimportantly, we tell Traefik to route **to** port `9000`, since that is the actual TCP/IP port the container actually listens on.
#### Gotchas and tips #### Gotchas and tips
- Always specify the correct port where the container expects HTTP traffic using `traefik.port` label. - Always specify the correct port where the container expects HTTP traffic using `traefik.port` label.
If a container exposes multiple ports, Traefik may forward traffic to the wrong port. If a container exposes multiple ports, Traefik may forward traffic to the wrong port.
Even if a container only exposes one port, you should always write configuration defensively and explicitly. Even if a container only exposes one port, you should always write configuration defensively and explicitly.
- Should you choose to enable the `exposedbydefault` flag in the `traefik.toml` configuration, be aware that all containers that are placed in the same network as Traefik will automatically be reachable from the outside world, for everyone and everyone to see. - Should you choose to enable the `exposedbydefault` flag in the `traefik.toml` configuration, be aware that all containers that are placed in the same network as Traefik will automatically be reachable from the outside world, for everyone and everyone to see.
@ -213,5 +228,6 @@ Finally but not unimportantly, we tell Traefik to route **to** port `9000`, sinc
### Final thoughts ### Final thoughts
Using Traefik as a Layer-7 load balancer in combination with both Docker and Let's Encrypt provides you with an extremely flexible, performant and self-configuring solution for your projects. Using Traefik as a Layer-7 load balancer in combination with both Docker and Let's Encrypt provides you with an extremely flexible, powerful and self-configuring solution for your projects.
With Let's Encrypt, your endpoints are automatically secured with production-ready SSL certificates that are renewed automatically as well. With Let's Encrypt, your endpoints are automatically secured with production-ready SSL certificates that are renewed automatically as well.

View file

@ -22,11 +22,11 @@ defaultEntryPoints = ["http", "https"]
address = ":443" address = ":443"
[entryPoints.https.tls] [entryPoints.https.tls]
[[entryPoints.https.tls.certificates]] [[entryPoints.https.tls.certificates]]
CertFile = "integration/fixtures/https/snitest.com.cert" certFile = "integration/fixtures/https/snitest.com.cert"
KeyFile = "integration/fixtures/https/snitest.com.key" keyFile = "integration/fixtures/https/snitest.com.key"
[[entryPoints.https.tls.certificates]] [[entryPoints.https.tls.certificates]]
CertFile = "integration/fixtures/https/snitest.org.cert" certFile = "integration/fixtures/https/snitest.org.cert"
KeyFile = "integration/fixtures/https/snitest.org.key" keyFile = "integration/fixtures/https/snitest.org.key"
``` ```
Note that we can either give path to certificate file or directly the file content itself ([like in this TOML example](/user-guide/kv-config/#upload-the-configuration-in-the-key-value-store)). Note that we can either give path to certificate file or directly the file content itself ([like in this TOML example](/user-guide/kv-config/#upload-the-configuration-in-the-key-value-store)).
@ -43,8 +43,8 @@ defaultEntryPoints = ["http", "https"]
address = ":443" address = ":443"
[entryPoints.https.tls] [entryPoints.https.tls]
[[entryPoints.https.tls.certificates]] [[entryPoints.https.tls.certificates]]
CertFile = "examples/traefik.crt" certFile = "examples/traefik.crt"
KeyFile = "examples/traefik.key" keyFile = "examples/traefik.key"
``` ```
## Let's Encrypt support ## Let's Encrypt support
@ -76,6 +76,7 @@ entryPoint = "https"
``` ```
This configuration allows generating Let's Encrypt certificates for the four domains `local[1-4].com` with described SANs. This configuration allows generating Let's Encrypt certificates for the four domains `local[1-4].com` with described SANs.
Traefik generates these certificates when it starts and it needs to be restart if new domains are added. Traefik generates these certificates when it starts and it needs to be restart if new domains are added.
### OnHostRule option ### OnHostRule option
@ -106,6 +107,7 @@ entryPoint = "https"
``` ```
This configuration allows generating Let's Encrypt certificates for the four domains `local[1-4].com`. This configuration allows generating Let's Encrypt certificates for the four domains `local[1-4].com`.
Traefik generates these certificates when it starts. Traefik generates these certificates when it starts.
If a backend is added with a `onHost` rule, Traefik will automatically generate the Let's Encrypt certificate for the new domain. If a backend is added with a `onHost` rule, Traefik will automatically generate the Let's Encrypt certificate for the new domain.
@ -121,10 +123,9 @@ If a backend is added with a `onHost` rule, Traefik will automatically generate
[acme] [acme]
email = "test@traefik.io" email = "test@traefik.io"
storage = "acme.json" storage = "acme.json"
OnDemand = true onDemand = true
caServer = "http://172.18.0.1:4000/directory" caServer = "http://172.18.0.1:4000/directory"
entryPoint = "https" entryPoint = "https"
``` ```
This configuration allows generating a Let's Encrypt certificate during the first HTTPS request on a new domain. This configuration allows generating a Let's Encrypt certificate during the first HTTPS request on a new domain.
@ -166,8 +167,10 @@ entryPoint = "https"
main = "local4.com" main = "local4.com"
``` ```
DNS challenge needs environment variables to be executed. This variables have to be set on the machine/container which host Traefik. DNS challenge needs environment variables to be executed.
These variables has described [in this section](toml/#acme-lets-encrypt-configuration). This variables have to be set on the machine/container which host Traefik.
These variables has described [in this section](/configuration/acme/#dnsprovider).
### OnHostRule option and provided certificates ### OnHostRule option and provided certificates
@ -177,8 +180,8 @@ These variables has described [in this section](toml/#acme-lets-encrypt-configur
address = ":443" address = ":443"
[entryPoints.https.tls] [entryPoints.https.tls]
[[entryPoints.https.tls.certificates]] [[entryPoints.https.tls.certificates]]
CertFile = "examples/traefik.crt" certFile = "examples/traefik.crt"
KeyFile = "examples/traefik.key" keyFile = "examples/traefik.key"
[acme] [acme]
email = "test@traefik.io" email = "test@traefik.io"
@ -226,7 +229,6 @@ entryPoint = "https"
endpoint = "127.0.0.1:8500" endpoint = "127.0.0.1:8500"
watch = true watch = true
prefix = "traefik" prefix = "traefik"
``` ```
This configuration allows to use the key `traefik/acme/account` to get/set Let's Encrypt certificates content. This configuration allows to use the key `traefik/acme/account` to get/set Let's Encrypt certificates content.
@ -277,7 +279,7 @@ defaultEntryPoints = ["http"]
## Pass Authenticated user to application via headers ## Pass Authenticated user to application via headers
Providing an authentication method as described above, it is possible to pass the user to the application Providing an authentication method as described above, it is possible to pass the user to the application
via a configurable header value via a configurable header value.
```toml ```toml
defaultEntryPoints = ["http"] defaultEntryPoints = ["http"]
@ -293,6 +295,91 @@ defaultEntryPoints = ["http"]
## Override the Traefik HTTP server IdleTimeout and/or throttle configurations from re-loading too quickly ## Override the Traefik HTTP server IdleTimeout and/or throttle configurations from re-loading too quickly
```toml ```toml
IdleTimeout = "360s" providersThrottleDuration = "5s"
ProvidersThrottleDuration = "5s"
[respondingTimeouts]
idleTimeout = "360s"
``` ```
## Securing Ping Health Check
The `/ping` health-check URL is enabled together with the web admin panel, enabled with the command-line `--web` or config file option `[web]`.
Thus, if you have a regular path for `/foo` and an entrypoint on `:80`, you would access them as follows:
* Regular path: `http://hostname:80/foo`
* Admin panel: `http://hostname:8080/`
* Ping URL: `http://hostname:8080/ping`
However, for security reasons, you may want to be able to expose the `/ping` health-check URL to outside health-checkers, e.g. an Internet service or cloud load-balancer, _without_ exposing your admin panel's port.
In many environments, the security staff may not _allow_ you to expose it.
You have two options:
* Enable `/ping` on a regular entrypoint
* Enable `/ping` on a dedicated port
### Enable ping health check on a regular entrypoint
To proxy `/ping` from a regular entrypoint to the admin one without exposing the panel, do the following:
```toml
[backends]
[backends.traefik]
[backends.traefik.servers.server1]
url = "http://localhost:8080"
weight = 10
[frontends]
[frontends.traefikadmin]
backend = "traefik"
[frontends.traefikadmin.routes.ping]
rule = "Path:/ping"
```
The above creates a new backend called `traefik`, listening on `http://localhost:8080`, i.e. the local admin port.
We only expose the admin panel via the `frontend` named `traefikadmin`, and only expose the `/ping` Path.
Be careful with the `traefikadmin` frontend. If you do _not_ specify a `Path:` rule, you would expose the entire dashboard.
### Enable ping health check on dedicated port
If you do not want to or cannot expose the health-check on a regular entrypoint - e.g. your security rules do not allow it, or you have a conflicting path - then you can enable health-check on its own entrypoint.
Use the following config:
```toml
defaultEntryPoints = ["http"]
[entryPoints]
[entryPoints.http]
address = ":80"
[entryPoints.ping]
address = ":8082"
[backends]
[backends.traefik]
[backends.traefik.servers.server1]
url = "http://localhost:8080"
weight = 10
[frontends]
[frontends.traefikadmin]
backend = "traefik"
entrypoints = ["ping"]
[frontends.traefikadmin.routes.ping]
rule = "Path:/ping"
```
The above is similar to the previous example, but instead of enabling `/ping` on the _default_ entrypoint, we enable it on a _dedicated_ entrypoint.
In the above example, you would access a regular path, admin panel and health-check as follows:
* Regular path: `http://hostname:80/foo`
* Admin panel: `http://hostname:8080/`
* Ping URL: `http://hostname:8082/ping`
Note the dedicated port `:8082` for `/ping`.
In the above example, it is _very_ important to create a named dedicated entrypoint, and do **not** include it in `defaultEntryPoints`.
Otherwise, you are likely to expose _all_ services via that entrypoint.
In the above example, we have two entrypoints, `http` and `ping`, but we only included `http` in `defaultEntryPoints`, while explicitly tying `frontend.traefikadmin` to the `ping` entrypoint.
This ensures that all the "normal" frontends will be exposed via entrypoint `http` and _not_ via entrypoint `ping`.

148
docs/user-guide/grpc.md Normal file
View file

@ -0,0 +1,148 @@
# gRPC example
This section explains how to use Traefik as reverse proxy for gRPC application with self-signed certificates.
!!! warning
As gRPC needs HTTP2, we need valid HTTPS certificates on both gRPC Server and Træfik.
<p align="center">
<img src="/img/grpc.svg" alt="gRPC architecture" title="gRPC architecture" />
</p>
## gRPC Server certificate
In order to secure the gRPC server, we generate a self-signed certificate for backend url:
```bash
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./backend.key -out ./backend.crt
```
That will prompt for information, the important answer is:
```
Common Name (e.g. server FQDN or YOUR name) []: backend.local
```
## gRPC Client certificate
Generate your self-signed certificate for frontend url:
```bash
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./frontend.key -out ./frontend.crt
```
with
```
Common Name (e.g. server FQDN or YOUR name) []: frontend.local
```
## Træfik configuration
At last, we configure our Træfik instance to use both self-signed certificates.
```toml
defaultEntryPoints = ["https"]
# For secure connection on backend.local
RootCAs = [ "./backend.cert" ]
[entryPoints]
[entryPoints.https]
address = ":4443"
[entryPoints.https.tls]
# For secure connection on frontend.local
[[entryPoints.https.tls.certificates]]
certFile = "./frontend.cert"
keyFile = "./frontend.key"
[web]
address = ":8080"
[file]
[backends]
[backends.backend1]
[backends.backend1.servers.server1]
# Access on backend with HTTPS
url = "https://backend.local:8080"
[frontends]
[frontends.frontend1]
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Host:frontend.local"
```
## Conclusion
We don't need specific configuration to use gRPC in Træfik, we just need to be careful that all the exchanges (between client and Træfik, and between Træfik and backend) are valid HTTPS communications (without `InsecureSkipVerify` enabled) because gRPC use HTTP2.
## A gRPC example in go
We will use the gRPC greeter example in [grpc-go](https://github.com/grpc/grpc-go/tree/master/examples/helloworld)
!!! warning
In order to use this gRPC example, we need to modify it to use HTTPS
So we modify the "gRPC server example" to use our own self-signed certificate:
```go
// ...
// Read cert and key file
BackendCert := ioutil.ReadFile("./backend.cert")
BackendKey := ioutil.ReadFile("./backend.key")
// Generate Certificate struct
cert, err := tls.X509KeyPair(BackendCert, BackendKey)
if err != nil {
return err
}
// Create credentials
creds := credentials.NewServerTLSFromCert(&cert)
// Use Credentials in gRPC server options
serverOption := grpc.Creds(creds)
var s *grpc.Server = grpc.NewServer(serverOption)
defer s.Stop()
helloworld.RegisterGreeterServer(s, &myserver{})
err := s.Serve(lis)
// ...
```
Next we will modify gRPC Client to use our Træfik self-signed certificate:
```go
// ...
// Read cert file
FrontendCert := ioutil.ReadFile("./frontend.cert")
// Create CertPool
roots := x509.NewCertPool()
roots.AppendCertsFromPEM(FrontendCert)
// Create credentials
credsClient := credentials.NewClientTLSFromCert(roots, "")
// Dial with specific Transport (with credentials)
conn, err := grpc.Dial("https://frontend:4443", grpc.WithTransportCredentials(credsClient))
if err != nil {
return err
}
defer conn.Close()
client := helloworld.NewGreeterClient(conn)
name := "World"
r, err := client.SayHello(context.Background(), &helloworld.HelloRequest{Name: name})
// ...
```

View file

@ -1,6 +1,7 @@
# Kubernetes Ingress Controller # Kubernetes Ingress Controller
This guide explains how to use Træfik as an Ingress controller in a Kubernetes cluster. This guide explains how to use Træfik as an Ingress controller in a Kubernetes cluster.
If you are not familiar with Ingresses in Kubernetes you might want to read the [Kubernetes user guide](https://kubernetes.io/docs/concepts/services-networking/ingress/) If you are not familiar with Ingresses in Kubernetes you might want to read the [Kubernetes user guide](https://kubernetes.io/docs/concepts/services-networking/ingress/)
The config files used in this guide can be found in the [examples directory](https://github.com/containous/traefik/tree/master/examples/k8s) The config files used in this guide can be found in the [examples directory](https://github.com/containous/traefik/tree/master/examples/k8s)
@ -72,9 +73,10 @@ kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/exa
It is possible to use Træfik with a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) or a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) object, It is possible to use Træfik with a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) or a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) object,
whereas both options have their own pros and cons: whereas both options have their own pros and cons:
The scalability is much better when using a Deployment, because you will have a Single-Pod-per-Node model when using the DeaemonSet.
It is possible to exclusively run a Service on a dedicated set of machines using taints and tolerations with a DaemonSet. - The scalability is much better when using a Deployment, because you will have a Single-Pod-per-Node model when using the DeaemonSet.
On the other hand the DaemonSet allows you to access any Node directly on Port 80 and 443, where you have to setup a [Service](https://kubernetes.io/docs/concepts/services-networking/service/) object with a Deployment. - It is possible to exclusively run a Service on a dedicated set of machines using taints and tolerations with a DaemonSet.
- On the other hand the DaemonSet allows you to access any Node directly on Port 80 and 443, where you have to setup a [Service](https://kubernetes.io/docs/concepts/services-networking/service/) object with a Deployment.
The Deployment objects looks like this: The Deployment objects looks like this:
@ -131,7 +133,8 @@ spec:
``` ```
[examples/k8s/traefik-deployment.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/traefik-deployment.yaml) [examples/k8s/traefik-deployment.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/traefik-deployment.yaml)
> The Service will expose two NodePorts which allow access to the ingress and the web interface. !!! note
The Service will expose two NodePorts which allow access to the ingress and the web interface.
The DaemonSet objects looks not much different: The DaemonSet objects looks not much different:
@ -198,20 +201,20 @@ spec:
To deploy Træfik to your cluster start by submitting one of the YAML files to the cluster with `kubectl`: To deploy Træfik to your cluster start by submitting one of the YAML files to the cluster with `kubectl`:
```shell ```shell
$ kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/traefik-deployment.yaml kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/traefik-deployment.yaml
``` ```
```shell ```shell
$ kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/traefik-ds.yaml kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/traefik-ds.yaml
``` ```
There are some significant differences between using Deployments and DaemonSets. There are some significant differences between using Deployments and DaemonSets:
The Deployment has easier up and down scaling possibilities. It can implement full pod lifecycle and supports rolling updates from Kubernetes 1.2.
At least one Pod is needed to run the Deployment.
The DaemonSet automatically scales to all nodes that meets a specific selector and guarantees to fill nodes one at a time.
Rolling updates are fully supported from Kubernetes 1.7 for DaemonSets as well.
- The Deployment has easier up and down scaling possibilities.
It can implement full pod lifecycle and supports rolling updates from Kubernetes 1.2.
At least one Pod is needed to run the Deployment.
- The DaemonSet automatically scales to all nodes that meets a specific selector and guarantees to fill nodes one at a time.
Rolling updates are fully supported from Kubernetes 1.7 for DaemonSets as well.
### Check the Pods ### Check the Pods
@ -220,8 +223,10 @@ Now lets check if our command was successful.
Start by listing the pods in the `kube-system` namespace: Start by listing the pods in the `kube-system` namespace:
```shell ```shell
$ kubectl --namespace=kube-system get pods kubectl --namespace=kube-system get pods
```
```
NAME READY STATUS RESTARTS AGE NAME READY STATUS RESTARTS AGE
kube-addon-manager-minikubevm 1/1 Running 0 4h kube-addon-manager-minikubevm 1/1 Running 0 4h
kubernetes-dashboard-s8krj 1/1 Running 0 4h kubernetes-dashboard-s8krj 1/1 Running 0 4h
@ -231,14 +236,17 @@ traefik-ingress-controller-678226159-eqseo 1/1 Running 0 7m
You should see that after submitting the Deployment or DaemonSet to Kubernetes it has launched a Pod, and it is now running. You should see that after submitting the Deployment or DaemonSet to Kubernetes it has launched a Pod, and it is now running.
_It might take a few moments for kubernetes to pull the Træfik image and start the container._ _It might take a few moments for kubernetes to pull the Træfik image and start the container._
> You could also check the deployment with the Kubernetes dashboard, run !!! note
> `minikube dashboard` to open it in your browser, then choose the `kube-system` You could also check the deployment with the Kubernetes dashboard, run
> namespace from the menu at the top right of the screen. `minikube dashboard` to open it in your browser, then choose the `kube-system`
namespace from the menu at the top right of the screen.
You should now be able to access Træfik on port 80 of your Minikube instance when using the DaemonSet: You should now be able to access Træfik on port 80 of your Minikube instance when using the DaemonSet:
```sh ```sh
curl $(minikube ip) curl $(minikube ip)
```
```
404 page not found 404 page not found
``` ```
@ -246,20 +254,24 @@ If you decided to use the deployment, then you need to target the correct NodePo
```sh ```sh
curl $(minikube ip):<NODEPORT> curl $(minikube ip):<NODEPORT>
```
```
404 page not found 404 page not found
``` ```
> We expect to see a 404 response here as we haven't yet given Træfik any configuration. !!! note
We expect to see a 404 response here as we haven't yet given Træfik any configuration.
## Deploy Træfik using Helm Chart ## Deploy Træfik using Helm Chart
Instead of installing Træfik via an own object, you can also use the Træfik Helm chart. Instead of installing Træfik via an own object, you can also use the Træfik Helm chart.
This allows more complex configuration via Kubernetes [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configmap/) and enabled TLS certificates. This allows more complex configuration via Kubernetes [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configmap/) and enabled TLS certificates.
Install Træfik chart by: Install Træfik chart by:
```shell ```shell
$ helm install stable/traefik helm install stable/traefik
``` ```
For more information, check out [the doc](https://github.com/kubernetes/charts/tree/master/stable/traefik). For more information, check out [the doc](https://github.com/kubernetes/charts/tree/master/stable/traefik).
@ -305,9 +317,8 @@ kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/exa
Now lets setup an entry in our /etc/hosts file to route `traefik-ui.minikube` to our cluster. Now lets setup an entry in our /etc/hosts file to route `traefik-ui.minikube` to our cluster.
> In production you would want to set up real dns entries. In production you would want to set up real dns entries.
You can get the ip address of your minikube instance by running `minikube ip`
> You can get the ip address of your minikube instance by running `minikube ip`
```shell ```shell
echo "$(minikube ip) traefik-ui.minikube" | sudo tee -a /etc/hosts echo "$(minikube ip) traefik-ui.minikube" | sudo tee -a /etc/hosts
@ -346,13 +357,6 @@ spec:
containers: containers:
- name: cheese - name: cheese
image: errm/cheese:stilton image: errm/cheese:stilton
resources:
requests:
cpu: 100m
memory: 50Mi
limits:
cpu: 100m
memory: 50Mi
ports: ports:
- containerPort: 80 - containerPort: 80
--- ---
@ -379,13 +383,6 @@ spec:
containers: containers:
- name: cheese - name: cheese
image: errm/cheese:cheddar image: errm/cheese:cheddar
resources:
requests:
cpu: 100m
memory: 50Mi
limits:
cpu: 100m
memory: 50Mi
ports: ports:
- containerPort: 80 - containerPort: 80
--- ---
@ -412,13 +409,6 @@ spec:
containers: containers:
- name: cheese - name: cheese
image: errm/cheese:wensleydale image: errm/cheese:wensleydale
resources:
requests:
cpu: 100m
memory: 50Mi
limits:
cpu: 100m
memory: 50Mi
ports: ports:
- containerPort: 80 - containerPort: 80
``` ```
@ -474,8 +464,8 @@ spec:
task: wensleydale task: wensleydale
``` ```
> Notice that we also set a [circuit breaker expression](https://docs.traefik.io/basics/#backends) for one of the backends !!! note
> by setting the `traefik.backend.circuitbreaker` annotation on the service. We also set a [circuit breaker expression](/basics/#backends) for one of the backends by setting the `traefik.backend.circuitbreaker` annotation on the service.
[examples/k8s/cheese-services.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/cheese-services.yaml) [examples/k8s/cheese-services.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/cheese-services.yaml)
@ -519,13 +509,15 @@ spec:
``` ```
[examples/k8s/cheese-ingress.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/cheese-ingress.yaml) [examples/k8s/cheese-ingress.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/cheese-ingress.yaml)
> Notice that we list each hostname, and add a backend service. !!! note
we list each hostname, and add a backend service.
```shell ```shell
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/cheese-ingress.yaml kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/cheese-ingress.yaml
``` ```
Now visit the [Træfik dashboard](http://traefik-ui.minikube/) and you should see a frontend for each host. Along with a backend listing for each service with a Server set up for each pod. Now visit the [Træfik dashboard](http://traefik-ui.minikube/) and you should see a frontend for each host.
Along with a backend listing for each service with a Server set up for each pod.
If you edit your `/etc/hosts` again you should be able to access the cheese websites in your browser. If you edit your `/etc/hosts` again you should be able to access the cheese websites in your browser.
@ -543,7 +535,6 @@ Now lets suppose that our fictional client has decided that while they are super
No problem, we say, why don't we reconfigure the sites to host all 3 under one domain. No problem, we say, why don't we reconfigure the sites to host all 3 under one domain.
```yaml ```yaml
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
kind: Ingress kind: Ingress
@ -572,9 +563,8 @@ spec:
``` ```
[examples/k8s/cheeses-ingress.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/cheeses-ingress.yaml) [examples/k8s/cheeses-ingress.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/cheeses-ingress.yaml)
> Notice that we are configuring Træfik to strip the prefix from the url path !!! note
> with the `traefik.frontend.rule.type` annotation so that we can use we are configuring Træfik to strip the prefix from the url path with the `traefik.frontend.rule.type` annotation so that we can use the containers from the previous example without modification.
> the containers from the previous example without modification.
```shell ```shell
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/cheeses-ingress.yaml kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/cheeses-ingress.yaml
@ -632,18 +622,20 @@ spec:
## Forwarding to ExternalNames ## Forwarding to ExternalNames
When specifying an [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#services-without-selectors), When specifying an [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#services-without-selectors),
Træfik will forward requests to the given host accordingly and use HTTPS when the Service port matches 443. Træfik will forward requests to the given host accordingly and use HTTPS when the Service port matches 443.
This still requires setting up a proper port mapping on the Service from the Ingress port to the (external) Service port. This still requires setting up a proper port mapping on the Service from the Ingress port to the (external) Service port.
## Disable passing the Host header ## Disable passing the Host header
By default Træfik will pass the incoming Host header on to the upstream resource. By default Træfik will pass the incoming Host header on to the upstream resource.
There are times however where you may not want this to be the case. There are times however where you may not want this to be the case.
For example if your service is of the ExternalName type. For example if your service is of the ExternalName type.
### Disable entirely ### Disable entirely
Add the following to your toml config: Add the following to your toml config:
```toml ```toml
disablePassHostHeaders = true disablePassHostHeaders = true
``` ```
@ -653,6 +645,7 @@ disablePassHostHeaders = true
To disable passing the Host header per ingress resource set the `traefik.frontend.passHostHeader` annotation on your ingress to `false`. To disable passing the Host header per ingress resource set the `traefik.frontend.passHostHeader` annotation on your ingress to `false`.
Here is an example ingress definition: Here is an example ingress definition:
```yaml ```yaml
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
kind: Ingress kind: Ingress
@ -673,6 +666,7 @@ spec:
``` ```
And an example service definition: And an example service definition:
```yaml ```yaml
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
@ -696,8 +690,20 @@ If you were to visit `example.com/static` the request would then be passed onto
## Excluding an ingress from Træfik ## Excluding an ingress from Træfik
You can control which ingress Træfik cares about by using the `kubernetes.io/ingress.class` annotation. You can control which ingress Træfik cares about by using the `kubernetes.io/ingress.class` annotation.
By default if the annotation is not set at all Træfik will include the ingress. By default if the annotation is not set at all Træfik will include the ingress.
If the annotation is set to anything other than traefik or a blank string Træfik will ignore it. If the annotation is set to anything other than traefik or a blank string Træfik will ignore it.
![](https://i.giphy.com/ujUdrdpX7Ok5W.gif) ## Production advice
### Resource limitations
The examples shown deliberately do not specify any [resource limitations](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) as there is no one size fits all.
In a production environment, however, it is important to set proper bounds, especially with regards to CPU:
- too strict and Traefik will be throttled while serving requests (as Kubernetes imposes hard quotas)
- too loose and Traefik may waste resources not available for other containers
When in doubt, you should measure your resource needs, and adjust requests and limits accordingly.

View file

@ -14,13 +14,16 @@ Træfik supports several Key-value stores:
## Static configuration in Key-value store ## Static configuration in Key-value store
We will see the steps to set it up with an easy example. We will see the steps to set it up with an easy example.
Note that we could do the same with any other Key-value Store.
## docker-compose file for Consul !!! note
We could do the same with any other Key-value Store.
### docker-compose file for Consul
The Træfik global configuration will be getted from a [Consul](https://consul.io) store. The Træfik global configuration will be getted from a [Consul](https://consul.io) store.
First we have to launch Consul in a container. First we have to launch Consul in a container.
The [docker-compose file](https://docs.docker.com/compose/compose-file/) allows us to launch Consul and four instances of the trivial app [emilevauge/whoamI](https://github.com/emilevauge/whoamI) : The [docker-compose file](https://docs.docker.com/compose/compose-file/) allows us to launch Consul and four instances of the trivial app [emilevauge/whoamI](https://github.com/emilevauge/whoamI) :
```yaml ```yaml
@ -51,12 +54,12 @@ whoami4:
image: emilevauge/whoami image: emilevauge/whoami
``` ```
## Upload the configuration in the Key-value store ### Upload the configuration in the Key-value store
We should now fill the store with the Træfik global configuration, as we do with a [TOML file configuration](/toml). We should now fill the store with the Træfik global configuration, as we do with a [TOML file configuration](/toml).
To do that, we can send the Key-value pairs via [curl commands](https://www.consul.io/intro/getting-started/kv.html) or via the [Web UI](https://www.consul.io/intro/getting-started/ui.html). To do that, we can send the Key-value pairs via [curl commands](https://www.consul.io/intro/getting-started/kv.html) or via the [Web UI](https://www.consul.io/intro/getting-started/ui.html).
Fortunately, Træfik allows automation of this process using the `storeconfig` subcommand. Fortunately, Træfik allows automation of this process using the `storeconfig` subcommand.
Please refer to the [store Træfik configuration](/user-guide/kv-config/#store-configuration-in-key-value-store) section to get documentation on it. Please refer to the [store Træfik configuration](/user-guide/kv-config/#store-configuration-in-key-value-store) section to get documentation on it.
Here is the toml configuration we would like to store in the Key-value Store : Here is the toml configuration we would like to store in the Key-value Store :
@ -83,7 +86,6 @@ defaultEntryPoints = ["http", "https"]
<key file content> <key file content>
-----END CERTIFICATE-----""" -----END CERTIFICATE-----"""
[consul] [consul]
endpoint = "127.0.0.1:8500" endpoint = "127.0.0.1:8500"
watch = true watch = true
@ -118,9 +120,10 @@ In case you are setting key values manually:
Note that we can either give path to certificate file or directly the file content itself. Note that we can either give path to certificate file or directly the file content itself.
## Launch Træfik ### Launch Træfik
We will now launch Træfik in a container. We will now launch Træfik in a container.
We use CLI flags to setup the connection between Træfik and Consul. We use CLI flags to setup the connection between Træfik and Consul.
All the rest of the global configuration is stored in Consul. All the rest of the global configuration is stored in Consul.
@ -138,21 +141,23 @@ traefik:
!!! warning !!! warning
Be careful to give the correct IP address and port in the flag `--consul.endpoint`. Be careful to give the correct IP address and port in the flag `--consul.endpoint`.
## Consul ACL Token support ### Consul ACL Token support
To specify a Consul ACL token for Traefik, we have to set a System Environment variable named `CONSUL_HTTP_TOKEN` prior to starting traefik. This variable must be initialized with the ACL token value. To specify a Consul ACL token for Traefik, we have to set a System Environment variable named `CONSUL_HTTP_TOKEN` prior to starting Traefik.
This variable must be initialized with the ACL token value.
If Traefik is launched into a Docker container, the variable `CONSUL_HTTP_TOKEN` can be initialized with the `-e` Docker option : `-e "CONSUL_HTTP_TOKEN=[consul-acl-token-value]"` If Traefik is launched into a Docker container, the variable `CONSUL_HTTP_TOKEN` can be initialized with the `-e` Docker option : `-e "CONSUL_HTTP_TOKEN=[consul-acl-token-value]"`
## TLS support ### TLS support
To connect to a Consul endpoint using SSL, simply specify `https://` in the `consul.endpoint` property To connect to a Consul endpoint using SSL, simply specify `https://` in the `consul.endpoint` property
- `--consul.endpoint=https://[consul-host]:[consul-ssl-port]` - `--consul.endpoint=https://[consul-host]:[consul-ssl-port]`
## TLS support with client certificates ### TLS support with client certificates
So far, only [Consul](https://consul.io) and [etcd](https://coreos.com/etcd/) support TLS connections with client certificates. So far, only [Consul](https://consul.io) and [etcd](https://coreos.com/etcd/) support TLS connections with client certificates.
To set it up, we should enable [consul security](https://www.consul.io/docs/internals/security.html) (or [etcd security](https://coreos.com/etcd/docs/latest/security.html)). To set it up, we should enable [consul security](https://www.consul.io/docs/internals/security.html) (or [etcd security](https://coreos.com/etcd/docs/latest/security.html)).
Then, we have to provide CA, Cert and Key to Træfik using `consul` flags : Then, we have to provide CA, Cert and Key to Træfik using `consul` flags :
@ -169,18 +174,20 @@ Or etcd flags :
- `--etcd.tls.cert=path/to/the/file` - `--etcd.tls.cert=path/to/the/file`
- `--etcd.tls.key=path/to/the/file` - `--etcd.tls.key=path/to/the/file`
Note that we can either give directly directly the file content itself (instead of the path to certificate) in a TOML file configuration. !! note
We can either give directly directly the file content itself (instead of the path to certificate) in a TOML file configuration.
Remember the command `traefik --help` to display the updated list of flags. Remember the command `traefik --help` to display the updated list of flags.
# Dynamic configuration in Key-value store ## Dynamic configuration in Key-value store
Following our example, we will provide backends/frontends rules to Træfik. Following our example, we will provide backends/frontends rules to Træfik.
Note that this section is independent of the way Træfik got its static configuration. !!! note
It means that the static configuration can either come from the same Key-value store or from any other sources. This section is independent of the way Træfik got its static configuration.
It means that the static configuration can either come from the same Key-value store or from any other sources.
## Key-value storage structure ### Key-value storage structure
Here is the toml configuration we would like to store in the store : Here is the toml configuration we would like to store in the store :
@ -272,14 +279,15 @@ And there, the same dynamic configuration in a KV Store (using `prefix = "traefi
| `/traefik/frontends/frontend2/entrypoints` | `http,https` | | `/traefik/frontends/frontend2/entrypoints` | `http,https` |
| `/traefik/frontends/frontend2/routes/test_2/rule` | `PathPrefix:/test` | | `/traefik/frontends/frontend2/routes/test_2/rule` | `PathPrefix:/test` |
## Atomic configuration changes ### Atomic configuration changes
Træfik can watch the backends/frontends configuration changes and generate its configuration automatically. Træfik can watch the backends/frontends configuration changes and generate its configuration automatically.
Note that only backends/frontends rules are dynamic, the rest of the Træfik configuration stay static. !!! note
Only backends/frontends rules are dynamic, the rest of the Træfik configuration stay static.
The [Etcd](https://github.com/coreos/etcd/issues/860) and [Consul](https://github.com/hashicorp/consul/issues/886) backends do not support updating multiple keys atomically. The [Etcd](https://github.com/coreos/etcd/issues/860) and [Consul](https://github.com/hashicorp/consul/issues/886) backends do not support updating multiple keys atomically.
As a result, it may be possible for Træfik to read an intermediate configuration state despite judicious use of the `--providersThrottleDuration` flag. As a result, it may be possible for Træfik to read an intermediate configuration state despite judicious use of the `--providersThrottleDuration` flag.
To solve this problem, Træfik supports a special key called `/traefik/alias`. To solve this problem, Træfik supports a special key called `/traefik/alias`.
If set, Træfik use the value as an alternative key prefix. If set, Træfik use the value as an alternative key prefix.
@ -292,6 +300,7 @@ Given the key structure below, Træfik will use the `http://172.17.0.2:80` as it
| `/traefik_configurations/1/backends/backend1/servers/server1/weight` | `10` | | `/traefik_configurations/1/backends/backend1/servers/server1/weight` | `10` |
When an atomic configuration change is required, you may write a new configuration at an alternative prefix. When an atomic configuration change is required, you may write a new configuration at an alternative prefix.
Here, although the `/traefik_configurations/2/...` keys have been set, the old configuration is still active because the `/traefik/alias` key still points to `/traefik_configurations/1`: Here, although the `/traefik_configurations/2/...` keys have been set, the old configuration is still active because the `/traefik/alias` key still points to `/traefik_configurations/1`:
| Key | Value | | Key | Value |
@ -305,6 +314,7 @@ Here, although the `/traefik_configurations/2/...` keys have been set, the old c
| `/traefik_configurations/2/backends/backend1/servers/server2/weight` | `5` | | `/traefik_configurations/2/backends/backend1/servers/server2/weight` | `5` |
Once the `/traefik/alias` key is updated, the new `/traefik_configurations/2` configuration becomes active atomically. Once the `/traefik/alias` key is updated, the new `/traefik_configurations/2` configuration becomes active atomically.
Here, we have a 50% balance between the `http://172.17.0.3:80` and the `http://172.17.0.4:80` hosts while no traffic is sent to the `172.17.0.2:80` host: Here, we have a 50% balance between the `http://172.17.0.3:80` and the `http://172.17.0.4:80` hosts while no traffic is sent to the `172.17.0.2:80` host:
| Key | Value | | Key | Value |
@ -317,22 +327,25 @@ Here, we have a 50% balance between the `http://172.17.0.3:80` and the `http://1
| `/traefik_configurations/2/backends/backend1/servers/server2/url` | `http://172.17.0.4:80` | | `/traefik_configurations/2/backends/backend1/servers/server2/url` | `http://172.17.0.4:80` |
| `/traefik_configurations/2/backends/backend1/servers/server2/weight` | `5` | | `/traefik_configurations/2/backends/backend1/servers/server2/weight` | `5` |
Note that Træfik *will not watch for key changes in the `/traefik_configurations` prefix*. It will only watch for changes in the `/traefik/alias`. !!! note
Further, if the `/traefik/alias` key is set, all other configuration with `/traefik/backends` or `/traefik/frontends` prefix are ignored. Træfik *will not watch for key changes in the `/traefik_configurations` prefix*. It will only watch for changes in the `/traefik/alias`.
Further, if the `/traefik/alias` key is set, all other configuration with `/traefik/backends` or `/traefik/frontends` prefix are ignored.
# Store configuration in Key-value store ## Store configuration in Key-value store
!!! note
Don't forget to [setup the connection between Træfik and Key-value store](/user-guide/kv-config/#launch-trfk).
Don't forget to [setup the connection between Træfik and Key-value store](/user-guide/kv-config/#launch-trfk).
The static Træfik configuration in a key-value store can be automatically created and updated, using the [`storeconfig` subcommand](/basics/#commands). The static Træfik configuration in a key-value store can be automatically created and updated, using the [`storeconfig` subcommand](/basics/#commands).
```bash ```bash
traefik storeconfig [flags] ... traefik storeconfig [flags] ...
``` ```
This command is here only to automate the [process which upload the configuration into the Key-value store](/user-guide/kv-config/#upload-the-configuration-in-the-key-value-store). This command is here only to automate the [process which upload the configuration into the Key-value store](/user-guide/kv-config/#upload-the-configuration-in-the-key-value-store).
Træfik will not start but the [static configuration](/basics/#static-trfk-configuration) will be uploaded into the Key-value store. Træfik will not start but the [static configuration](/basics/#static-trfk-configuration) will be uploaded into the Key-value store.
If you configured ACME (Let's Encrypt), your registration account and your certificates will also be uploaded. If you configured ACME (Let's Encrypt), your registration account and your certificates will also be uploaded.
To upload your ACME certificates to the KV store, get your traefik TOML file and add the new `storage` option in the `acme` section: To upload your ACME certificates to the KV store, get your Traefik TOML file and add the new `storage` option in the `acme` section:
```toml ```toml
[acme] [acme]

View file

@ -2,7 +2,7 @@
This guide explains how to integrate Marathon and operate the cluster in a reliable way from Traefik's standpoint. This guide explains how to integrate Marathon and operate the cluster in a reliable way from Traefik's standpoint.
# Host detection ## Host detection
Marathon offers multiple ways to run (Docker-containerized) applications, the most popular ones being Marathon offers multiple ways to run (Docker-containerized) applications, the most popular ones being
@ -14,9 +14,11 @@ Traefik tries to detect the configured mode and route traffic to the right IP ad
Given the complexity of the subject, it is possible that the heuristic fails. Given the complexity of the subject, it is possible that the heuristic fails.
Apart from filing an issue and waiting for the feature request / bug report to get addressed, one workaround for such situations is to customize the Marathon template file to the individual needs. Apart from filing an issue and waiting for the feature request / bug report to get addressed, one workaround for such situations is to customize the Marathon template file to the individual needs.
(Note that this does _not_ require rebuilding Traefik but only to point the `filename` configuration parameter to a customized version of the `marathon.tmpl` file on Traefik startup.)
# Port detection !!! note
This does _not_ require rebuilding Traefik but only to point the `filename` configuration parameter to a customized version of the `marathon.tmpl` file on Traefik startup.
## Port detection
Traefik also attempts to determine the right port (which is a [non-trivial matter in Marathon](https://mesosphere.github.io/marathon/docs/ports.html)). Traefik also attempts to determine the right port (which is a [non-trivial matter in Marathon](https://mesosphere.github.io/marathon/docs/ports.html)).
Following is the order by which Traefik tries to identify the port (the first one that yields a positive result will be used): Following is the order by which Traefik tries to identify the port (the first one that yields a positive result will be used):
@ -26,9 +28,9 @@ Following is the order by which Traefik tries to identify the port (the first on
1. The port from the application's `portDefinitions` field (possibly indexed through the `traefik.portIndex` label, otherwise the first one). 1. The port from the application's `portDefinitions` field (possibly indexed through the `traefik.portIndex` label, otherwise the first one).
1. The port from the application's `ipAddressPerTask` field (possibly indexed through the `traefik.portIndex` label, otherwise the first one). 1. The port from the application's `ipAddressPerTask` field (possibly indexed through the `traefik.portIndex` label, otherwise the first one).
# Achieving high availability ## Achieving high availability
## Scenarios ### Scenarios
There are three scenarios where the availability of a Marathon application could be impaired along with the risk of losing or failing requests: There are three scenarios where the availability of a Marathon application could be impaired along with the risk of losing or failing requests:
@ -36,27 +38,29 @@ There are three scenarios where the availability of a Marathon application could
- During the shutdown phase when Traefik still routes requests to the backend while the backend is already terminating. - During the shutdown phase when Traefik still routes requests to the backend while the backend is already terminating.
- During a failure of the application when Traefik has not yet identified the backend as being erroneous. - During a failure of the application when Traefik has not yet identified the backend as being erroneous.
The first two scenarios are common with every rolling upgrade of an application (i.e., a new version release or configuration update). The first two scenarios are common with every rolling upgrade of an application (i.e. a new version release or configuration update).
The following sub-sections describe how to resolve or mitigate each scenario. The following sub-sections describe how to resolve or mitigate each scenario.
### Startup #### Startup
It is possible to define [readiness checks](https://mesosphere.github.io/marathon/docs/readiness-checks.html) (available since Marathon version 1.1) per application and have Marathon take these into account during the startup phase. It is possible to define [readiness checks](https://mesosphere.github.io/marathon/docs/readiness-checks.html) (available since Marathon version 1.1) per application and have Marathon take these into account during the startup phase.
The idea is that each application provides an HTTP endpoint that Marathon queries periodically during an ongoing deployment in order to mark the associated readiness check result as successful if and only if the endpoint returns a response within the configured HTTP code range.
The idea is that each application provides an HTTP endpoint that Marathon queries periodically during an ongoing deployment in order to mark the associated readiness check result as successful if and only if the endpoint returns a response within the configured HTTP code range.
As long as the check keeps failing, Marathon will not proceed with the deployment (within the configured upgrade strategy bounds). As long as the check keeps failing, Marathon will not proceed with the deployment (within the configured upgrade strategy bounds).
Beginning with version 1.4, Traefik respects readiness check results if the Traefik option is set and checks are configured on the applications accordingly. Beginning with version 1.4, Traefik respects readiness check results if the Traefik option is set and checks are configured on the applications accordingly.
Note that due to the way readiness check results are currently exposed by the Marathon API, ready tasks may be taken into rotation with a small delay.
It is on the order of one readiness check timeout interval (as configured on the application specifiation) and guarantees that non-ready tasks do not receive traffic prematurely. !!! note
Due to the way readiness check results are currently exposed by the Marathon API, ready tasks may be taken into rotation with a small delay.
It is on the order of one readiness check timeout interval (as configured on the application specifiation) and guarantees that non-ready tasks do not receive traffic prematurely.
If readiness checks are not possible, a current mitigation strategy is to enable [retries](/configuration/commons#retry-configuration) and make sure that a sufficient number of healthy application tasks exist so that one retry will likely hit one of those. If readiness checks are not possible, a current mitigation strategy is to enable [retries](/configuration/commons#retry-configuration) and make sure that a sufficient number of healthy application tasks exist so that one retry will likely hit one of those.
Apart from its probabilistic nature, the workaround comes at the price of increased latency. Apart from its probabilistic nature, the workaround comes at the price of increased latency.
### Shutdown #### Shutdown
It is possible to install a [termination handler](https://mesosphere.github.io/marathon/docs/health-checks.html) (available since Marathon version 1.3) with each application whose responsibility it is to delay the shutdown process long enough until the backend has been taken out of load-balancing rotation with reasonable confidence It is possible to install a [termination handler](https://mesosphere.github.io/marathon/docs/health-checks.html) (available since Marathon version 1.3) with each application whose responsibility it is to delay the shutdown process long enough until the backend has been taken out of load-balancing rotation with reasonable confidence (i.e., Traefik has received an update from the Marathon event bus, recomputes the available Marathon backends, and applies the new configuration).
(i.e., Traefik has received an update from the Marathon event bus, recomputes the available Marathon backends, and applies the new configuration).
Specifically, each termination handler should install a signal handler listening for a SIGTERM signal and implement the following steps on signal reception: Specifically, each termination handler should install a signal handler listening for a SIGTERM signal and implement the following steps on signal reception:
1. Disable Keep-Alive HTTP connections. 1. Disable Keep-Alive HTTP connections.
@ -70,12 +74,13 @@ Traefik already ignores Marathon tasks whose state does not match `TASK_RUNNING`
How long HTTP requests should continue to be accepted in step 2 depends on how long Traefik needs to receive and process the Marathon configuration update. How long HTTP requests should continue to be accepted in step 2 depends on how long Traefik needs to receive and process the Marathon configuration update.
Under regular operational conditions, it should be on the order of seconds, with 10 seconds possibly being a good default value. Under regular operational conditions, it should be on the order of seconds, with 10 seconds possibly being a good default value.
Again, configuring Traefik to do retries (as discussed in the previous section) can serve as a decent workaround strategy. Again, configuring Traefik to do retries (as discussed in the previous section) can serve as a decent workaround strategy.
Paired with termination handlers, they would cover for those cases where either the termination sequence or Traefik cannot complete their part of the orchestration process in time. Paired with termination handlers, they would cover for those cases where either the termination sequence or Traefik cannot complete their part of the orchestration process in time.
### Failure #### Failure
A failing application always happens unexpectedly, and hence, it is very difficult or even impossible to rule out the adversal effects categorically. A failing application always happens unexpectedly, and hence, it is very difficult or even impossible to rule out the adversal effects categorically.
Failure reasons vary broadly and could stretch from unacceptable slowness, a task crash, or a network split. Failure reasons vary broadly and could stretch from unacceptable slowness, a task crash, or a network split.
There are two mitigaton efforts: There are two mitigaton efforts:
@ -85,19 +90,22 @@ There are two mitigaton efforts:
The Marathon health check makes sure that applications once deemed dysfunctional are being rescheduled to different slaves. The Marathon health check makes sure that applications once deemed dysfunctional are being rescheduled to different slaves.
However, they might take a while to get triggered and the follow-up processes to complete. However, they might take a while to get triggered and the follow-up processes to complete.
For that reason, the Treafik health check provides an additional check that responds more rapidly and does not require a configuration reload to happen. For that reason, the Treafik health check provides an additional check that responds more rapidly and does not require a configuration reload to happen.
Additionally, it protects from cases that the Marathon health check may not be able to cover, such as a network split. Additionally, it protects from cases that the Marathon health check may not be able to cover, such as a network split.
## (Non-)Alternatives ### (Non-)Alternatives
There are a few alternatives of varying quality that are frequently asked for. The remaining section is going to explore them along with a benefit/cost trade-off. There are a few alternatives of varying quality that are frequently asked for.
### Reusing Marathon health checks The remaining section is going to explore them along with a benefit/cost trade-off.
#### Reusing Marathon health checks
It may seem obvious to reuse the Marathon health checks as a signal to Traefik whether an application should be taken into load-balancing rotation or not. It may seem obvious to reuse the Marathon health checks as a signal to Traefik whether an application should be taken into load-balancing rotation or not.
Apart from the increased latency a failing health check may have, a major problem with this is is that Marathon does not persist the health check results. Apart from the increased latency a failing health check may have, a major problem with this is is that Marathon does not persist the health check results.
Consequently, if a master re-election occurs in the Marathon clusters, all health check results will revert to the _unknown_ state, effectively causing all applications inside the cluster to become unavailable and leading to a complete cluster failure. Consequently, if a master re-election occurs in the Marathon clusters, all health check results will revert to the _unknown_ state, effectively causing all applications inside the cluster to become unavailable and leading to a complete cluster failure.
Re-elections do not only happen during regular maintenance work (often requiring rolling upgrades of the Marathon nodes) but also when the Marathon leader fails spontaneously. Re-elections do not only happen during regular maintenance work (often requiring rolling upgrades of the Marathon nodes) but also when the Marathon leader fails spontaneously.
As such, there is no way to handle this situation deterministically. As such, there is no way to handle this situation deterministically.
@ -106,11 +114,14 @@ Finally, Marathon health checks are not mandatory (the default is to use the tas
Traefik used to use the health check results as a strict requirement but moved away from it as [users reported the dramatic consequences](https://github.com/containous/traefik/issues/653). Traefik used to use the health check results as a strict requirement but moved away from it as [users reported the dramatic consequences](https://github.com/containous/traefik/issues/653).
If health check results are known to exist, however, they will be used to signal task availability. If health check results are known to exist, however, they will be used to signal task availability.
### Draining #### Draining
Another common approach is to let a proxy drain backends that are supposed to shut down. That is, once a backend is supposed to shut down, Traefik would stop forwarding requests. Another common approach is to let a proxy drain backends that are supposed to shut down.
That is, once a backend is supposed to shut down, Traefik would stop forwarding requests.
On the plus side, this would not require any modifications to the application in question.
However, implementing this fully within Traefik seems like a non-trivial undertaking.
On the plus side, this would not require any modifications to the application in question. However, implementing this fully within Traefik seems like a non-trivial undertaking.
Additionally, the approach is less flexible compared to a custom termination handler since only the latter allows for the implementation of custom termination sequences that go beyond simple request draining (e.g., persisting a snapshot state to disk prior to terminating). Additionally, the approach is less flexible compared to a custom termination handler since only the latter allows for the implementation of custom termination sequences that go beyond simple request draining (e.g., persisting a snapshot state to disk prior to terminating).
The feature is currently not implemented; a request for draining in general is at [issue 41](https://github.com/containous/traefik/issues/41). The feature is currently not implemented; a request for draining in general is at [issue 41](https://github.com/containous/traefik/issues/41).

View file

@ -17,8 +17,8 @@ The cluster consists of:
## Cluster provisioning ## Cluster provisioning
First, let's create all the required nodes. It's a shorter version of First, let's create all the required nodes.
the [swarm tutorial](https://docs.docker.com/engine/swarm/swarm-tutorial/). It's a shorter version of the [swarm tutorial](https://docs.docker.com/engine/swarm/swarm-tutorial/).
```shell ```shell
docker-machine create -d virtualbox manager docker-machine create -d virtualbox manager
@ -29,8 +29,8 @@ docker-machine create -d virtualbox worker2
Then, let's setup the cluster, in order : Then, let's setup the cluster, in order :
1. initialize the cluster 1. initialize the cluster
2. get the token for other host to join 1. get the token for other host to join
3. on both workers, join the cluster with the token 1. on both workers, join the cluster with the token
```shell ```shell
docker-machine ssh manager "docker swarm init \ docker-machine ssh manager "docker swarm init \
@ -94,17 +94,19 @@ docker-machine ssh manager "docker service create \
Let's explain this command: Let's explain this command:
- `--publish 80:80 --publish 8080:8080`: we publish port `80` and `8080` on the cluster. | Option | Description |
- `--constraint=node.role==manager`: we ask docker to schedule Træfik on a manager node. |-----------------------------------------------------------------------------|------------------------------------------------------------------------------------------------|
- `--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock`: | `--publish 80:80 --publish 8080:8080` | we publish port `80` and `8080` on the cluster. |
we bind mount the docker socket where Træfik is scheduled to be able to speak to the daemon. | `--constraint=node.role==manager` | we ask docker to schedule Træfik on a manager node. |
- `--network traefik-net`: we attach the Træfik service (and thus the underlying container) to the `traefik-net` network. | `--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock` | we bind mount the docker socket where Træfik is scheduled to be able to speak to the daemon. |
- `--docker`: enable docker backend, and `--docker.swarmmode` to enable the swarm mode on Træfik. | `--network traefik-net` | we attach the Træfik service (and thus the underlying container) to the `traefik-net` network. |
- `--web`: activate the webUI on port 8080 | `--docker` | enable docker backend, and `--docker.swarmmode` to enable the swarm mode on Træfik. |
| `--web` | activate the webUI on port 8080 |
## Deploy your apps ## Deploy your apps
We can now deploy our app on the cluster, here [whoami](https://github.com/emilevauge/whoami), a simple web server in Go. We start 2 services, on the `traefik-net` network. We can now deploy our app on the cluster, here [whoami](https://github.com/emilevauge/whoami), a simple web server in Go.
We start 2 services, on the `traefik-net` network.
```shell ```shell
docker-machine ssh manager "docker service create \ docker-machine ssh manager "docker service create \
@ -121,9 +123,12 @@ docker-machine ssh manager "docker service create \
emilevauge/whoami" emilevauge/whoami"
``` ```
Note that we set whoami1 to use sticky sessions (`--label traefik.backend.loadbalancer.sticky=true`). We'll demonstrate that later. !!! note
We set whoami1 to use sticky sessions (`--label traefik.backend.loadbalancer.sticky=true`).
We'll demonstrate that later.
**Note**: If using `docker stack deploy`, there is [a specific way that the labels must be defined in the docker-compose file](https://github.com/containous/traefik/issues/994#issuecomment-269095109). !!! note
If using `docker stack deploy`, there is [a specific way that the labels must be defined in the docker-compose file](https://github.com/containous/traefik/issues/994#issuecomment-269095109).
Check that everything is scheduled and started: Check that everything is scheduled and started:
@ -182,7 +187,8 @@ X-Forwarded-Proto: http
X-Forwarded-Server: 8fbc39271b4c X-Forwarded-Server: 8fbc39271b4c
``` ```
Note that as Træfik is published, you can access it from any machine and not only the manager. !!! note
As Træfik is published, you can access it from any machine and not only the manager.
```shell ```shell
curl -H Host:whoami0.traefik http://$(docker-machine ip worker1) curl -H Host:whoami0.traefik http://$(docker-machine ip worker1)
@ -231,11 +237,9 @@ X-Forwarded-Server: 8fbc39271b4c
```shell ```shell
docker-machine ssh manager "docker service scale whoami0=5" docker-machine ssh manager "docker service scale whoami0=5"
docker-machine ssh manager "docker service scale whoami1=5" docker-machine ssh manager "docker service scale whoami1=5"
``` ```
Check that we now have 5 replicas of each `whoami` service: Check that we now have 5 replicas of each `whoami` service:
```shell ```shell
@ -298,7 +302,9 @@ X-Forwarded-Host: 10.0.9.4:80
X-Forwarded-Proto: http X-Forwarded-Proto: http
X-Forwarded-Server: 8fbc39271b4c X-Forwarded-Server: 8fbc39271b4c
``` ```
Wait, I thought we added the sticky flag to `whoami1`? Traefik relies on a cookie to maintain stickyness so you'll need to test this with a browser.
Wait, I thought we added the sticky flag to `whoami1`?
Traefik relies on a cookie to maintain stickyness so you'll need to test this with a browser.
First you need to add `whoami1.traefik` to your hosts file: First you need to add `whoami1.traefik` to your hosts file:

View file

@ -1,6 +1,7 @@
# Swarm cluster # Swarm cluster
This section explains how to create a multi-host [swarm](https://docs.docker.com/swarm) cluster using [docker-machine](https://docs.docker.com/machine/) and how to deploy Træfik on it. This section explains how to create a multi-host [swarm](https://docs.docker.com/swarm) cluster using [docker-machine](https://docs.docker.com/machine/) and how to deploy Træfik on it.
The cluster consists of: The cluster consists of:
- 2 servers - 2 servers
@ -97,14 +98,17 @@ docker $(docker-machine config mhs-demo0) run \
Let's explain this command: Let's explain this command:
- `-p 80:80 -p 8080:8080`: we bind ports 80 and 8080 | Option | Description |
- `--net=my-net`: run the container on the network my-net |-------------------------------------------|---------------------------------------------------------------|
- `-v /var/lib/boot2docker/:/ssl`: mount the ssl keys generated by docker-machine | `-p 80:80 -p 8080:8080` | we bind ports 80 and 8080 |
- `-c /dev/null`: empty config file | `--net=my-net` | run the container on the network my-net |
- `--docker`: enable docker backend | `-v /var/lib/boot2docker/:/ssl` | mount the ssl keys generated by docker-machine |
- `--docker.endpoint=tcp://172.18.0.1:3376`: connect to the swarm master using the docker_gwbridge network | `-c /dev/null` | empty config file |
- `--docker.tls`: enable TLS using the docker-machine keys | `--docker` | enable docker backend |
- `--web`: activate the webUI on port 8080 | `--docker.endpoint=tcp://172.18.0.1:3376` | connect to the swarm master using the docker_gwbridge network |
| `--docker.tls` | enable TLS using the docker-machine keys |
| `--web` | activate the webUI on port 8080 |
## Deploy your apps ## Deploy your apps

View file

@ -19,19 +19,19 @@ address = ":7888"
################################################################ ################################################################
# rules # rules
################################################################ ################################################################
[backends] [backends]
[backends.backend] [backends.backend]
[backends.backend.LoadBalancer] [backends.backend.LoadBalancer]
method = "drr" method = "drr"
[backends.backend.servers.server1] [backends.backend.servers.server1]
url = "http://127.0.0.1:8081" url = "http://127.0.0.1:8081"
[backends.backend.servers.server2] [backends.backend.servers.server2]
url = "http://127.0.0.1:8082" url = "http://127.0.0.1:8082"
[backends.backend.servers.server3] [backends.backend.servers.server3]
url = "http://127.0.0.1:8083" url = "http://127.0.0.1:8083"
[frontends] [frontends]
[frontends.frontend] [frontends.frontend]
backend = "backend" backend = "backend"
passHostHeader = true passHostHeader = true
[frontends.frontend.routes.test] [frontends.frontend.routes.test]
rule = "Path: /test" rule = "Path: /test"

View file

@ -19,24 +19,24 @@ address = ":7888"
################################################################ ################################################################
# rules # rules
################################################################ ################################################################
[backends] [backends]
[backends.backend1] [backends.backend1]
[backends.backend1.servers.server1] [backends.backend1.servers.server1]
url = "http://127.0.0.1:8081" url = "http://127.0.0.1:8081"
[backends.backend2] [backends.backend2]
[backends.backend2.LoadBalancer] [backends.backend2.LoadBalancer]
method = "drr" method = "drr"
[backends.backend2.servers.server1] [backends.backend2.servers.server1]
url = "http://127.0.0.1:8082" url = "http://127.0.0.1:8082"
[backends.backend2.servers.server2] [backends.backend2.servers.server2]
url = "http://127.0.0.1:8083" url = "http://127.0.0.1:8083"
[frontends] [frontends]
[frontends.frontend1] [frontends.frontend1]
backend = "backend1" backend = "backend1"
[frontends.frontend1.routes.test_1] [frontends.frontend1.routes.test_1]
rule = "Path: /test1" rule = "Path: /test1"
[frontends.frontend2] [frontends.frontend2]
backend = "backend2" backend = "backend2"
passHostHeader = true passHostHeader = true
[frontends.frontend2.routes.test_2] [frontends.frontend2.routes.test_2]
rule = "Path: /test2" rule = "Path: /test2"

View file

@ -6,7 +6,7 @@ defaultEntryPoints = ["http", "https"]
[entryPoints.http] [entryPoints.http]
address = ":80" address = ":80"
[entryPoints.http.redirect] [entryPoints.http.redirect]
entryPoint = "https" entryPoint = "https"
[entryPoints.https] [entryPoints.https]
address = ":443" address = ":443"
[entryPoints.https.tls] [entryPoints.https.tls]
@ -22,7 +22,7 @@ caServer = "http://traefik.localhost.com:4000/directory"
[web] [web]
address = ":8080" address = ":8080"
[docker] [docker]
endpoint = "unix:///var/run/docker.sock" endpoint = "unix:///var/run/docker.sock"

10
glide.lock generated
View file

@ -1,4 +1,4 @@
hash: 857714f15cc10657ae2f15f3ab6592734a1ebc524d32b5353948258ce3043b6a hash: f5dd83cd0bcf9f38bf6916bc028e108c59aee57ea440e914bc68f2b90da227d3
updated: 2017-09-09T11:52:16.848940186+02:00 updated: 2017-09-09T11:52:16.848940186+02:00
imports: imports:
- name: cloud.google.com/go - name: cloud.google.com/go
@ -383,9 +383,7 @@ imports:
repo: https://github.com/ijc25/Gotty.git repo: https://github.com/ijc25/Gotty.git
vcs: git vcs: git
- name: github.com/NYTimes/gziphandler - name: github.com/NYTimes/gziphandler
version: 824b33f2a7457025697878c865c323f801118043 version: 97ae7fbaf81620fe97840685304a78a306a39c64
repo: https://github.com/containous/gziphandler.git
vcs: git
- name: github.com/ogier/pflag - name: github.com/ogier/pflag
version: 45c278ab3607870051a2ea9040bb85fcb8557481 version: 45c278ab3607870051a2ea9040bb85fcb8557481
- name: github.com/opencontainers/go-digest - name: github.com/opencontainers/go-digest
@ -784,7 +782,9 @@ testImports:
- name: github.com/flynn/go-shlex - name: github.com/flynn/go-shlex
version: 3f9db97f856818214da2e1057f8ad84803971cff version: 3f9db97f856818214da2e1057f8ad84803971cff
- name: github.com/go-check/check - name: github.com/go-check/check
version: 11d3bc7aa68e238947792f30573146a3231fc0f1 version: ca0bf163426aa183d03fd4949101785c0347f273
repo: https://github.com/containous/check.git
vcs: git
- name: github.com/gorilla/mux - name: github.com/gorilla/mux
version: e444e69cbd2e2e3e0749a2f3c717cec491552bbf version: e444e69cbd2e2e3e0749a2f3c717cec491552bbf
- name: github.com/libkermit/compose - name: github.com/libkermit/compose

View file

@ -80,9 +80,6 @@ import:
vcs: git vcs: git
- package: github.com/abbot/go-http-auth - package: github.com/abbot/go-http-auth
- package: github.com/NYTimes/gziphandler - package: github.com/NYTimes/gziphandler
version: ^v1002.0.0
repo: https://github.com/containous/gziphandler.git
vcs: git
- package: github.com/docker/leadership - package: github.com/docker/leadership
- package: github.com/satori/go.uuid - package: github.com/satori/go.uuid
version: ^1.1.0 version: ^1.1.0
@ -209,7 +206,9 @@ testImport:
- package: github.com/docker/libcompose - package: github.com/docker/libcompose
version: 1b708aac26a4fc6f9bff31728a8e3a252ef57dbd version: 1b708aac26a4fc6f9bff31728a8e3a252ef57dbd
- package: github.com/go-check/check - package: github.com/go-check/check
version: 11d3bc7aa68e238947792f30573146a3231fc0f1 version: fork-containous
repo: https://github.com/containous/check.git
vcs: git
- package: github.com/libkermit/compose - package: github.com/libkermit/compose
version: 4a33a16f1446ba205c4da7b09105d5bdc293b432 version: 4a33a16f1446ba205c4da7b09105d5bdc293b432
subpackages: subpackages:

View file

@ -35,7 +35,7 @@ type Options struct {
} }
func (opt Options) String() string { func (opt Options) String() string {
return fmt.Sprintf("[Path: %s Interval: %s]", opt.Path, opt.Interval) return fmt.Sprintf("[Path: %s Port: %d Interval: %s]", opt.Path, opt.Port, opt.Interval)
} }
// BackendHealthCheck HealthCheck configuration for a backend // BackendHealthCheck HealthCheck configuration for a backend
@ -131,14 +131,14 @@ func checkBackend(currentBackend *BackendHealthCheck) {
} }
func (backend *BackendHealthCheck) newRequest(serverURL *url.URL) (*http.Request, error) { func (backend *BackendHealthCheck) newRequest(serverURL *url.URL) (*http.Request, error) {
if backend.Options.Port == 0 { if backend.Port == 0 {
return http.NewRequest("GET", serverURL.String()+backend.Path, nil) return http.NewRequest("GET", serverURL.String()+backend.Path, nil)
} }
// copy the url and add the port to the host // copy the url and add the port to the host
u := &url.URL{} u := &url.URL{}
*u = *serverURL *u = *serverURL
u.Host = net.JoinHostPort(u.Hostname(), strconv.Itoa(backend.Options.Port)) u.Host = net.JoinHostPort(u.Hostname(), strconv.Itoa(backend.Port))
u.Path = u.Path + backend.Path u.Path = u.Path + backend.Path
return http.NewRequest("GET", u.String(), nil) return http.NewRequest("GET", u.String(), nil)

View file

@ -30,7 +30,8 @@ func (s *AccessLogSuite) TestAccessLog(c *check.C) {
os.Remove(traefikTestLogFile) os.Remove(traefikTestLogFile)
// Start Traefik // Start Traefik
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/access_log_config.toml")) cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -96,7 +97,7 @@ func CheckAccessLogFormat(c *check.C, line string, i int) {
tokens, err := shellwords.Parse(line) tokens, err := shellwords.Parse(line)
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
c.Assert(tokens, checker.HasLen, 14) c.Assert(tokens, checker.HasLen, 14)
c.Assert(tokens[6], checker.Matches, `^\d{3}$`) c.Assert(tokens[6], checker.Matches, `^(-|\d{3})$`)
c.Assert(tokens[10], checker.Equals, fmt.Sprintf("%d", i+1)) c.Assert(tokens[10], checker.Equals, fmt.Sprintf("%d", i+1))
c.Assert(tokens[11], checker.HasPrefix, "frontend") c.Assert(tokens[11], checker.HasPrefix, "frontend")
c.Assert(tokens[12], checker.HasPrefix, "http://127.0.0.1:808") c.Assert(tokens[12], checker.HasPrefix, "http://127.0.0.1:808")

View file

@ -4,6 +4,7 @@ import (
"crypto/tls" "crypto/tls"
"fmt" "fmt"
"net/http" "net/http"
"os"
"time" "time"
"github.com/containous/traefik/integration/try" "github.com/containous/traefik/integration/try"
@ -101,8 +102,10 @@ func (s *AcmeSuite) retrieveAcmeCertificate(c *check.C, testCase AcmeTestCase) {
OnDemand: testCase.onDemand, OnDemand: testCase.onDemand,
OnHostRule: !testCase.onDemand, OnHostRule: !testCase.onDemand,
}) })
defer os.Remove(file)
cmd, output := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -120,8 +123,6 @@ func (s *AcmeSuite) retrieveAcmeCertificate(c *check.C, testCase AcmeTestCase) {
_, err := client.Get("https://127.0.0.1:5001") _, err := client.Get("https://127.0.0.1:5001")
return err return err
}) })
// TODO: waiting a refactor of integration tests
s.displayTraefikLog(c, output)
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
tr = &http.Transport{ tr = &http.Transport{

View file

@ -80,12 +80,13 @@ func (s *ConstraintSuite) deregisterService(name string, address string) error {
} }
func (s *ConstraintSuite) TestMatchConstraintGlobal(c *check.C) { func (s *ConstraintSuite) TestMatchConstraintGlobal(c *check.C) {
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.endpoint="+s.consulIP+":8500", "--consulCatalog.endpoint="+s.consulIP+":8500",
"--consulCatalog.domain=consul.localhost", "--consulCatalog.domain=consul.localhost",
"--constraints=tag==api") "--constraints=tag==api")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -105,12 +106,13 @@ func (s *ConstraintSuite) TestMatchConstraintGlobal(c *check.C) {
} }
func (s *ConstraintSuite) TestDoesNotMatchConstraintGlobal(c *check.C) { func (s *ConstraintSuite) TestDoesNotMatchConstraintGlobal(c *check.C) {
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.endpoint="+s.consulIP+":8500", "--consulCatalog.endpoint="+s.consulIP+":8500",
"--consulCatalog.domain=consul.localhost", "--consulCatalog.domain=consul.localhost",
"--constraints=tag==api") "--constraints=tag==api")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -130,12 +132,13 @@ func (s *ConstraintSuite) TestDoesNotMatchConstraintGlobal(c *check.C) {
} }
func (s *ConstraintSuite) TestMatchConstraintProvider(c *check.C) { func (s *ConstraintSuite) TestMatchConstraintProvider(c *check.C) {
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.endpoint="+s.consulIP+":8500", "--consulCatalog.endpoint="+s.consulIP+":8500",
"--consulCatalog.domain=consul.localhost", "--consulCatalog.domain=consul.localhost",
"--consulCatalog.constraints=tag==api") "--consulCatalog.constraints=tag==api")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -155,12 +158,13 @@ func (s *ConstraintSuite) TestMatchConstraintProvider(c *check.C) {
} }
func (s *ConstraintSuite) TestDoesNotMatchConstraintProvider(c *check.C) { func (s *ConstraintSuite) TestDoesNotMatchConstraintProvider(c *check.C) {
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.endpoint="+s.consulIP+":8500", "--consulCatalog.endpoint="+s.consulIP+":8500",
"--consulCatalog.domain=consul.localhost", "--consulCatalog.domain=consul.localhost",
"--consulCatalog.constraints=tag==api") "--consulCatalog.constraints=tag==api")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -180,13 +184,14 @@ func (s *ConstraintSuite) TestDoesNotMatchConstraintProvider(c *check.C) {
} }
func (s *ConstraintSuite) TestMatchMultipleConstraint(c *check.C) { func (s *ConstraintSuite) TestMatchMultipleConstraint(c *check.C) {
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.endpoint="+s.consulIP+":8500", "--consulCatalog.endpoint="+s.consulIP+":8500",
"--consulCatalog.domain=consul.localhost", "--consulCatalog.domain=consul.localhost",
"--consulCatalog.constraints=tag==api", "--consulCatalog.constraints=tag==api",
"--constraints=tag!=us-*") "--constraints=tag!=us-*")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -206,13 +211,14 @@ func (s *ConstraintSuite) TestMatchMultipleConstraint(c *check.C) {
} }
func (s *ConstraintSuite) TestDoesNotMatchMultipleConstraint(c *check.C) { func (s *ConstraintSuite) TestDoesNotMatchMultipleConstraint(c *check.C) {
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.endpoint="+s.consulIP+":8500", "--consulCatalog.endpoint="+s.consulIP+":8500",
"--consulCatalog.domain=consul.localhost", "--consulCatalog.domain=consul.localhost",
"--consulCatalog.constraints=tag==api", "--consulCatalog.constraints=tag==api",
"--constraints=tag!=us-*") "--constraints=tag!=us-*")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()

View file

@ -104,10 +104,11 @@ func (s *ConsulCatalogSuite) deregisterService(name string, address string) erro
} }
func (s *ConsulCatalogSuite) TestSimpleConfiguration(c *check.C) { func (s *ConsulCatalogSuite) TestSimpleConfiguration(c *check.C) {
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.endpoint="+s.consulIP+":8500") "--consulCatalog.endpoint="+s.consulIP+":8500")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -119,15 +120,20 @@ func (s *ConsulCatalogSuite) TestSimpleConfiguration(c *check.C) {
} }
func (s *ConsulCatalogSuite) TestSingleService(c *check.C) { func (s *ConsulCatalogSuite) TestSingleService(c *check.C) {
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.endpoint="+s.consulIP+":8500", "--consulCatalog.endpoint="+s.consulIP+":8500",
"--consulCatalog.domain=consul.localhost") "--consulCatalog.domain=consul.localhost")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
// Wait for Traefik to turn ready.
err = try.GetRequest("http://127.0.0.1:8000/", 2*time.Second, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
nginx := s.composeProject.Container(c, "nginx1") nginx := s.composeProject.Container(c, "nginx1")
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{}) err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{})
@ -143,12 +149,13 @@ func (s *ConsulCatalogSuite) TestSingleService(c *check.C) {
} }
func (s *ConsulCatalogSuite) TestExposedByDefaultFalseSingleService(c *check.C) { func (s *ConsulCatalogSuite) TestExposedByDefaultFalseSingleService(c *check.C) {
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.exposedByDefault=false", "--consulCatalog.exposedByDefault=false",
"--consulCatalog.endpoint="+s.consulIP+":8500", "--consulCatalog.endpoint="+s.consulIP+":8500",
"--consulCatalog.domain=consul.localhost") "--consulCatalog.domain=consul.localhost")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -168,12 +175,13 @@ func (s *ConsulCatalogSuite) TestExposedByDefaultFalseSingleService(c *check.C)
} }
func (s *ConsulCatalogSuite) TestExposedByDefaultFalseSimpleServiceMultipleNode(c *check.C) { func (s *ConsulCatalogSuite) TestExposedByDefaultFalseSimpleServiceMultipleNode(c *check.C) {
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.exposedByDefault=false", "--consulCatalog.exposedByDefault=false",
"--consulCatalog.endpoint="+s.consulIP+":8500", "--consulCatalog.endpoint="+s.consulIP+":8500",
"--consulCatalog.domain=consul.localhost") "--consulCatalog.domain=consul.localhost")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -198,12 +206,13 @@ func (s *ConsulCatalogSuite) TestExposedByDefaultFalseSimpleServiceMultipleNode(
} }
func (s *ConsulCatalogSuite) TestExposedByDefaultTrueSimpleServiceMultipleNode(c *check.C) { func (s *ConsulCatalogSuite) TestExposedByDefaultTrueSimpleServiceMultipleNode(c *check.C) {
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.exposedByDefault=true", "--consulCatalog.exposedByDefault=true",
"--consulCatalog.endpoint="+s.consulIP+":8500", "--consulCatalog.endpoint="+s.consulIP+":8500",
"--consulCatalog.domain=consul.localhost") "--consulCatalog.domain=consul.localhost")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -232,12 +241,13 @@ func (s *ConsulCatalogSuite) TestExposedByDefaultTrueSimpleServiceMultipleNode(c
} }
func (s *ConsulCatalogSuite) TestRefreshConfigWithMultipleNodeWithoutHealthCheck(c *check.C) { func (s *ConsulCatalogSuite) TestRefreshConfigWithMultipleNodeWithoutHealthCheck(c *check.C) {
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.exposedByDefault=true", "--consulCatalog.exposedByDefault=true",
"--consulCatalog.endpoint="+s.consulIP+":8500", "--consulCatalog.endpoint="+s.consulIP+":8500",
"--consulCatalog.domain=consul.localhost") "--consulCatalog.domain=consul.localhost")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -283,20 +293,17 @@ func (s *ConsulCatalogSuite) TestRefreshConfigWithMultipleNodeWithoutHealthCheck
} }
func (s *ConsulCatalogSuite) TestBasicAuthSimpleService(c *check.C) { func (s *ConsulCatalogSuite) TestBasicAuthSimpleService(c *check.C) {
cmd, output := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/consul_catalog/simple.toml"), withConfigFile("fixtures/consul_catalog/simple.toml"),
"--consulCatalog", "--consulCatalog",
"--consulCatalog.exposedByDefault=true", "--consulCatalog.exposedByDefault=true",
"--consulCatalog.endpoint="+s.consulIP+":8500", "--consulCatalog.endpoint="+s.consulIP+":8500",
"--consulCatalog.domain=consul.localhost") "--consulCatalog.domain=consul.localhost")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
defer func() {
s.displayTraefikLog(c, output)
}()
nginx := s.composeProject.Container(c, "nginx1") nginx := s.composeProject.Container(c, "nginx1")
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{ err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{

View file

@ -95,7 +95,8 @@ func (s *ConsulSuite) TestSimpleConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/consul/simple.toml", struct{ ConsulHost string }{consulHost}) file := s.adaptFile(c, "fixtures/consul/simple.toml", struct{ ConsulHost string }{consulHost})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -111,7 +112,8 @@ func (s *ConsulSuite) TestNominalConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/consul/simple.toml", struct{ ConsulHost string }{consulHost}) file := s.adaptFile(c, "fixtures/consul/simple.toml", struct{ ConsulHost string }{consulHost})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -210,10 +212,11 @@ func (s *ConsulSuite) TestGlobalConfiguration(c *check.C) {
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
// start traefik // start traefik
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/simple_web.toml"), withConfigFile("fixtures/simple_web.toml"),
"--consul", "--consul",
"--consul.endpoint="+consulHost+":8500") "--consul.endpoint="+consulHost+":8500")
defer display(c)
err = cmd.Start() err = cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
@ -297,7 +300,7 @@ func (s *ConsulSuite) skipTestGlobalConfigurationWithClientTLS(c *check.C) {
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
// start traefik // start traefik
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/simple_web.toml"), withConfigFile("fixtures/simple_web.toml"),
"--consul", "--consul",
"--consul.endpoint="+consulHost+":8585", "--consul.endpoint="+consulHost+":8585",
@ -305,6 +308,7 @@ func (s *ConsulSuite) skipTestGlobalConfigurationWithClientTLS(c *check.C) {
"--consul.tls.cert=resources/tls/consul.cert", "--consul.tls.cert=resources/tls/consul.cert",
"--consul.tls.key=resources/tls/consul.key", "--consul.tls.key=resources/tls/consul.key",
"--consul.tls.insecureskipverify") "--consul.tls.insecureskipverify")
defer display(c)
err = cmd.Start() err = cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
@ -319,10 +323,11 @@ func (s *ConsulSuite) TestCommandStoreConfig(c *check.C) {
s.setupConsul(c) s.setupConsul(c)
consulHost := s.composeProject.Container(c, "consul").NetworkSettings.IPAddress consulHost := s.composeProject.Container(c, "consul").NetworkSettings.IPAddress
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
"storeconfig", "storeconfig",
withConfigFile("fixtures/simple_web.toml"), withConfigFile("fixtures/simple_web.toml"),
"--consul.endpoint="+consulHost+":8500") "--consul.endpoint="+consulHost+":8500")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)

View file

@ -79,7 +79,8 @@ func (s *DockerSuite) TestSimpleConfiguration(c *check.C) {
file := s.adaptFileForHost(c, "fixtures/docker/simple.toml") file := s.adaptFileForHost(c, "fixtures/docker/simple.toml")
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -96,7 +97,8 @@ func (s *DockerSuite) TestDefaultDockerContainers(c *check.C) {
name := s.startContainer(c, "swarm:1.0.0", "manage", "token://blablabla") name := s.startContainer(c, "swarm:1.0.0", "manage", "token://blablabla")
// Start traefik // Start traefik
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -128,7 +130,8 @@ func (s *DockerSuite) TestDockerContainersWithLabels(c *check.C) {
s.startContainerWithLabels(c, "swarm:1.0.0", labels, "manage", "token://blabla") s.startContainerWithLabels(c, "swarm:1.0.0", labels, "manage", "token://blabla")
// Start traefik // Start traefik
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -160,7 +163,8 @@ func (s *DockerSuite) TestDockerContainersWithOneMissingLabels(c *check.C) {
s.startContainerWithLabels(c, "swarm:1.0.0", labels, "manage", "token://blabla") s.startContainerWithLabels(c, "swarm:1.0.0", labels, "manage", "token://blabla")
// Start traefik // Start traefik
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()

View file

@ -147,7 +147,8 @@ func (s *DynamoDBSuite) TestSimpleConfiguration(c *check.C) {
dynamoURL := "http://" + s.composeProject.Container(c, "dynamo").NetworkSettings.IPAddress + ":8000" dynamoURL := "http://" + s.composeProject.Container(c, "dynamo").NetworkSettings.IPAddress + ":8000"
file := s.adaptFile(c, "fixtures/dynamodb/simple.toml", struct{ DynamoURL string }{dynamoURL}) file := s.adaptFile(c, "fixtures/dynamodb/simple.toml", struct{ DynamoURL string }{dynamoURL})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()

View file

@ -33,7 +33,8 @@ func (s *ErrorPagesSuite) TestSimpleConfiguration(c *check.C) {
}{s.BackendIP, s.ErrorPageIP}) }{s.BackendIP, s.ErrorPageIP})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -55,7 +56,8 @@ func (s *ErrorPagesSuite) TestErrorPage(c *check.C) {
}{s.BackendIP, s.ErrorPageIP}) }{s.BackendIP, s.ErrorPageIP})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()

View file

@ -64,7 +64,8 @@ func (s *EtcdSuite) TestSimpleConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct{ EtcdHost string }{etcdHost}) file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct{ EtcdHost string }{etcdHost})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -81,7 +82,8 @@ func (s *EtcdSuite) TestNominalConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct{ EtcdHost string }{etcdHost}) file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct{ EtcdHost string }{etcdHost})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -200,10 +202,11 @@ func (s *EtcdSuite) TestGlobalConfiguration(c *check.C) {
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
// start traefik // start traefik
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/simple_web.toml"), withConfigFile("fixtures/simple_web.toml"),
"--etcd", "--etcd",
"--etcd.endpoint="+etcdHost+":4001") "--etcd.endpoint="+etcdHost+":4001")
defer display(c)
err = cmd.Start() err = cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -279,10 +282,11 @@ func (s *EtcdSuite) TestGlobalConfiguration(c *check.C) {
func (s *EtcdSuite) TestCertificatesContentstWithSNIConfigHandshake(c *check.C) { func (s *EtcdSuite) TestCertificatesContentstWithSNIConfigHandshake(c *check.C) {
etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress
// start traefik // start traefik
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
withConfigFile("fixtures/simple_web.toml"), withConfigFile("fixtures/simple_web.toml"),
"--etcd", "--etcd",
"--etcd.endpoint="+etcdHost+":4001") "--etcd.endpoint="+etcdHost+":4001")
defer display(c)
whoami1IP := s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress whoami1IP := s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress
whoami2IP := s.composeProject.Container(c, "whoami2").NetworkSettings.IPAddress whoami2IP := s.composeProject.Container(c, "whoami2").NetworkSettings.IPAddress
@ -387,10 +391,11 @@ func (s *EtcdSuite) TestCertificatesContentstWithSNIConfigHandshake(c *check.C)
func (s *EtcdSuite) TestCommandStoreConfig(c *check.C) { func (s *EtcdSuite) TestCommandStoreConfig(c *check.C) {
etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress
cmd, _ := s.cmdTraefik( cmd, display := s.traefikCmd(
"storeconfig", "storeconfig",
withConfigFile("fixtures/simple_web.toml"), withConfigFile("fixtures/simple_web.toml"),
"--etcd.endpoint="+etcdHost+":4001") "--etcd.endpoint="+etcdHost+":4001")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)

View file

@ -40,7 +40,8 @@ func (s *EurekaSuite) TestSimpleConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/eureka/simple.toml", struct{ EurekaHost string }{s.eurekaIP}) file := s.adaptFile(c, "fixtures/eureka/simple.toml", struct{ EurekaHost string }{s.eurekaIP})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()

View file

@ -19,7 +19,8 @@ func (s *FileSuite) SetUpSuite(c *check.C) {
} }
func (s *FileSuite) TestSimpleConfiguration(c *check.C) { func (s *FileSuite) TestSimpleConfiguration(c *check.C) {
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/file/simple.toml")) cmd, display := s.traefikCmd(withConfigFile("fixtures/file/simple.toml"))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -31,7 +32,8 @@ func (s *FileSuite) TestSimpleConfiguration(c *check.C) {
// #56 regression test, make sure it does not fail // #56 regression test, make sure it does not fail
func (s *FileSuite) TestSimpleConfigurationNoPanic(c *check.C) { func (s *FileSuite) TestSimpleConfigurationNoPanic(c *check.C) {
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/file/56-simple-panic.toml")) cmd, display := s.traefikCmd(withConfigFile("fixtures/file/56-simple-panic.toml"))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -42,8 +44,8 @@ func (s *FileSuite) TestSimpleConfigurationNoPanic(c *check.C) {
} }
func (s *FileSuite) TestDirectoryConfiguration(c *check.C) { func (s *FileSuite) TestDirectoryConfiguration(c *check.C) {
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/file/directory.toml")) cmd, display := s.traefikCmd(withConfigFile("fixtures/file/directory.toml"))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()

View file

@ -0,0 +1,26 @@
defaultEntryPoints = ["http"]
logLevel = "DEBUG"
[entryPoints]
[entryPoints.http]
address = ":8000"
[web]
address = ":8080"
[file]
[backends]
[backends.backend1]
[backends.backend1.healthcheck]
path = "/health"
port = 80
interval = "1s"
[backends.backend1.servers.server1]
url = "http://{{.Server1}}:81"
[frontends]
[frontends.frontend1]
backend = "backend1"
[frontends.frontend1.routes.test_1]
rule = "Host:test.localhost"

View file

@ -24,7 +24,7 @@ type GRPCSuite struct{ BaseSuite }
type myserver struct{} type myserver struct{}
func (suite *GRPCSuite) SetUpSuite(c *check.C) { func (s *GRPCSuite) SetUpSuite(c *check.C) {
var err error var err error
LocalhostCert, err = ioutil.ReadFile("./resources/tls/local.cert") LocalhostCert, err = ioutil.ReadFile("./resources/tls/local.cert")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -72,7 +72,7 @@ func callHelloClientGRPC() (string, error) {
return r.Message, nil return r.Message, nil
} }
func (suite *GRPCSuite) TestGRPC(c *check.C) { func (s *GRPCSuite) TestGRPC(c *check.C) {
lis, err := net.Listen("tcp", ":0") lis, err := net.Listen("tcp", ":0")
_, port, err := net.SplitHostPort(lis.Addr().String()) _, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -83,7 +83,7 @@ func (suite *GRPCSuite) TestGRPC(c *check.C) {
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
}() }()
file := suite.adaptFile(c, "fixtures/grpc/config.toml", struct { file := s.adaptFile(c, "fixtures/grpc/config.toml", struct {
CertContent string CertContent string
KeyContent string KeyContent string
GRPCServerPort string GRPCServerPort string
@ -94,7 +94,8 @@ func (suite *GRPCSuite) TestGRPC(c *check.C) {
}) })
defer os.Remove(file) defer os.Remove(file)
cmd, _ := suite.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start() err = cmd.Start()
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)

View file

@ -34,7 +34,8 @@ func (s *HealthCheckSuite) TestSimpleConfiguration(c *check.C) {
}{s.whoami1IP, s.whoami2IP}) }{s.whoami1IP, s.whoami2IP})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -112,7 +113,8 @@ func (s *HealthCheckSuite) doTestMultipleEntrypoints(c *check.C, fixture string)
}{s.whoami1IP, s.whoami2IP}) }{s.whoami1IP, s.whoami2IP})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -163,3 +165,50 @@ func (s *HealthCheckSuite) doTestMultipleEntrypoints(c *check.C, fixture string)
err = try.Request(frontend1Req, 2*time.Second, try.BodyContains(s.whoami1IP)) err = try.Request(frontend1Req, 2*time.Second, try.BodyContains(s.whoami1IP))
c.Assert(err, checker.Not(checker.IsNil)) c.Assert(err, checker.Not(checker.IsNil))
} }
func (s *HealthCheckSuite) TestPortOverload(c *check.C) {
// Set one whoami health to 200
client := &http.Client{}
statusInternalServerErrorReq, err := http.NewRequest(http.MethodPost, "http://"+s.whoami1IP+"/health", bytes.NewBuffer([]byte("200")))
c.Assert(err, checker.IsNil)
_, err = client.Do(statusInternalServerErrorReq)
c.Assert(err, checker.IsNil)
file := s.adaptFile(c, "fixtures/healthcheck/port_overload.toml", struct {
Server1 string
}{s.whoami1IP})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer cmd.Process.Kill()
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("Host:test.localhost"))
c.Assert(err, checker.IsNil)
frontendHealthReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/health", nil)
c.Assert(err, checker.IsNil)
frontendHealthReq.Host = "test.localhost"
//We test bad gateway because we use an invalid port for the backend
err = try.Request(frontendHealthReq, 500*time.Millisecond, try.StatusCodeIs(http.StatusBadGateway))
c.Assert(err, checker.IsNil)
// Set one whoami health to 500
statusInternalServerErrorReq, err = http.NewRequest(http.MethodPost, "http://"+s.whoami1IP+"/health", bytes.NewBuffer([]byte("500")))
c.Assert(err, checker.IsNil)
_, err = client.Do(statusInternalServerErrorReq)
c.Assert(err, checker.IsNil)
// Waiting for Traefik healthcheck
try.Sleep(2 * time.Second)
// Verify no backend service is available due to failing health checks
err = try.Request(frontendHealthReq, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))
c.Assert(err, checker.IsNil)
}

View file

@ -20,7 +20,8 @@ type HTTPSSuite struct{ BaseSuite }
// "snitest.com", which happens to match the CN of 'snitest.com.crt'. The test // "snitest.com", which happens to match the CN of 'snitest.com.crt'. The test
// verifies that traefik presents the correct certificate. // verifies that traefik presents the correct certificate.
func (s *HTTPSSuite) TestWithSNIConfigHandshake(c *check.C) { func (s *HTTPSSuite) TestWithSNIConfigHandshake(c *check.C) {
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/https/https_sni.toml")) cmd, display := s.traefikCmd(withConfigFile("fixtures/https/https_sni.toml"))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -53,7 +54,8 @@ func (s *HTTPSSuite) TestWithSNIConfigHandshake(c *check.C) {
// SNI hostnames of "snitest.org" and "snitest.com". The test verifies // SNI hostnames of "snitest.org" and "snitest.com". The test verifies
// that traefik routes the requests to the expected backends. // that traefik routes the requests to the expected backends.
func (s *HTTPSSuite) TestWithSNIConfigRoute(c *check.C) { func (s *HTTPSSuite) TestWithSNIConfigRoute(c *check.C) {
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/https/https_sni.toml")) cmd, display := s.traefikCmd(withConfigFile("fixtures/https/https_sni.toml"))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -111,7 +113,8 @@ func (s *HTTPSSuite) TestWithSNIConfigRoute(c *check.C) {
// TestWithClientCertificateAuthentication // TestWithClientCertificateAuthentication
// The client has to send a certificate signed by a CA trusted by the server // The client has to send a certificate signed by a CA trusted by the server
func (s *HTTPSSuite) TestWithClientCertificateAuthentication(c *check.C) { func (s *HTTPSSuite) TestWithClientCertificateAuthentication(c *check.C) {
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/https/clientca/https_1ca1config.toml")) cmd, display := s.traefikCmd(withConfigFile("fixtures/https/clientca/https_1ca1config.toml"))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -157,7 +160,8 @@ func (s *HTTPSSuite) TestWithClientCertificateAuthentication(c *check.C) {
// TestWithClientCertificateAuthentication // TestWithClientCertificateAuthentication
// Use two CA:s and test that clients with client signed by either of them can connect // Use two CA:s and test that clients with client signed by either of them can connect
func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAs(c *check.C) { func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAs(c *check.C) {
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/https/clientca/https_2ca1config.toml")) cmd, display := s.traefikCmd(withConfigFile("fixtures/https/clientca/https_2ca1config.toml"))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -217,7 +221,8 @@ func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAs(c *check.
// TestWithClientCertificateAuthentication // TestWithClientCertificateAuthentication
// Use two CA:s in two different files and test that clients with client signed by either of them can connect // Use two CA:s in two different files and test that clients with client signed by either of them can connect
func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAsMultipleFiles(c *check.C) { func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAsMultipleFiles(c *check.C) {
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/https/clientca/https_2ca2config.toml")) cmd, display := s.traefikCmd(withConfigFile("fixtures/https/clientca/https_2ca2config.toml"))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -281,7 +286,8 @@ func (s *HTTPSSuite) TestWithRootCAsContentForHTTPSOnBackend(c *check.C) {
file := s.adaptFile(c, "fixtures/https/rootcas/https.toml", struct{ BackendHost string }{backend.URL}) file := s.adaptFile(c, "fixtures/https/rootcas/https.toml", struct{ BackendHost string }{backend.URL})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -302,7 +308,8 @@ func (s *HTTPSSuite) TestWithRootCAsFileForHTTPSOnBackend(c *check.C) {
file := s.adaptFile(c, "fixtures/https/rootcas/https_with_file.toml", struct{ BackendHost string }{backend.URL}) file := s.adaptFile(c, "fixtures/https/rootcas/https_with_file.toml", struct{ BackendHost string }{backend.URL})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()

View file

@ -12,6 +12,7 @@ import (
"testing" "testing"
"text/template" "text/template"
"github.com/containous/traefik/log"
"github.com/go-check/check" "github.com/go-check/check"
compose "github.com/libkermit/compose/check" compose "github.com/libkermit/compose/check"
checker "github.com/vdemeester/shakers" checker "github.com/vdemeester/shakers"
@ -88,12 +89,21 @@ func (s *BaseSuite) cmdTraefik(args ...string) (*exec.Cmd, *bytes.Buffer) {
return cmd, &out return cmd, &out
} }
func (s *BaseSuite) traefikCmd(args ...string) (*exec.Cmd, func(*check.C)) {
cmd, out := s.cmdTraefik(args...)
return cmd, func(c *check.C) {
if c.Failed() {
s.displayTraefikLog(c, out)
}
}
}
func (s *BaseSuite) displayTraefikLog(c *check.C, output *bytes.Buffer) { func (s *BaseSuite) displayTraefikLog(c *check.C, output *bytes.Buffer) {
if output == nil || output.Len() == 0 { if output == nil || output.Len() == 0 {
fmt.Printf("%s: No Traefik logs present.", c.TestName()) log.Printf("%s: No Traefik logs.", c.TestName())
} else { } else {
fmt.Printf("%s: Traefik logs: ", c.TestName()) log.Printf("%s: Traefik logs: ", c.TestName())
fmt.Println(output.String()) log.Println(output.String())
} }
} }

View file

@ -20,7 +20,8 @@ type LogRotationSuite struct{ BaseSuite }
func (s *LogRotationSuite) TestAccessLogRotation(c *check.C) { func (s *LogRotationSuite) TestAccessLogRotation(c *check.C) {
// Start Traefik // Start Traefik
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/access_log_config.toml")) cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -68,7 +69,8 @@ func (s *LogRotationSuite) TestAccessLogRotation(c *check.C) {
func (s *LogRotationSuite) TestTraefikLogRotation(c *check.C) { func (s *LogRotationSuite) TestTraefikLogRotation(c *check.C) {
// Start Traefik // Start Traefik
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/traefik_log_config.toml")) cmd, display := s.traefikCmd(withConfigFile("fixtures/traefik_log_config.toml"))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()

View file

@ -88,7 +88,8 @@ func (s *MarathonSuite) TestConfigurationUpdate(c *check.C) {
MarathonURL string MarathonURL string
}{s.marathonURL}) }{s.marathonURL})
defer os.Remove(file) defer os.Remove(file)
cmd, output := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()
@ -103,16 +104,6 @@ func (s *MarathonSuite) TestConfigurationUpdate(c *check.C) {
client, err := marathon.NewClient(config) client, err := marathon.NewClient(config)
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
// Show the Traefik log if any assertion fails. If the entire test runs
// to a successful completion, we flip the flag at the very end and don't
// display anything.
showTraefikLog := true
defer func() {
if showTraefikLog {
s.displayTraefikLog(c, output)
}
}()
// Create test application to be deployed. // Create test application to be deployed.
app := marathon.NewDockerApplication(). app := marathon.NewDockerApplication().
Name("/whoami"). Name("/whoami").
@ -146,6 +137,4 @@ func (s *MarathonSuite) TestConfigurationUpdate(c *check.C) {
// Query application via Traefik. // Query application via Traefik.
err = try.GetRequest("http://127.0.0.1:8000/app", 30*time.Second, try.StatusCodeIs(http.StatusOK)) err = try.GetRequest("http://127.0.0.1:8000/app", 30*time.Second, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
showTraefikLog = false
} }

View file

@ -17,7 +17,8 @@ func (s *MesosSuite) SetUpSuite(c *check.C) {
} }
func (s *MesosSuite) TestSimpleConfiguration(c *check.C) { func (s *MesosSuite) TestSimpleConfiguration(c *check.C) {
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/mesos/simple.toml")) cmd, display := s.traefikCmd(withConfigFile("fixtures/mesos/simple.toml"))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()

View file

@ -24,7 +24,8 @@ func (s *TimeoutSuite) TestForwardingTimeouts(c *check.C) {
}{httpTimeoutEndpoint}) }{httpTimeoutEndpoint})
defer os.Remove(file) defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file)) cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
defer cmd.Process.Kill() defer cmd.Process.Kill()

View file

@ -20,7 +20,7 @@ import (
// WebsocketSuite // WebsocketSuite
type WebsocketSuite struct{ BaseSuite } type WebsocketSuite struct{ BaseSuite }
func (suite *WebsocketSuite) TestBase(c *check.C) { func (s *WebsocketSuite) TestBase(c *check.C) {
var upgrader = gorillawebsocket.Upgrader{} // use default options var upgrader = gorillawebsocket.Upgrader{} // use default options
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -41,14 +41,15 @@ func (suite *WebsocketSuite) TestBase(c *check.C) {
} }
})) }))
file := suite.adaptFile(c, "fixtures/websocket/config.toml", struct { file := s.adaptFile(c, "fixtures/websocket/config.toml", struct {
WebsocketServer string WebsocketServer string
}{ }{
WebsocketServer: srv.URL, WebsocketServer: srv.URL,
}) })
defer os.Remove(file) defer os.Remove(file)
cmd, _ := suite.cmdTraefik(withConfigFile(file), "--debug") cmd, display := s.traefikCmd(withConfigFile(file), "--debug")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -69,7 +70,7 @@ func (suite *WebsocketSuite) TestBase(c *check.C) {
c.Assert(string(msg), checker.Equals, "OK") c.Assert(string(msg), checker.Equals, "OK")
} }
func (suite *WebsocketSuite) TestWrongOrigin(c *check.C) { func (s *WebsocketSuite) TestWrongOrigin(c *check.C) {
var upgrader = gorillawebsocket.Upgrader{} // use default options var upgrader = gorillawebsocket.Upgrader{} // use default options
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -90,14 +91,15 @@ func (suite *WebsocketSuite) TestWrongOrigin(c *check.C) {
} }
})) }))
file := suite.adaptFile(c, "fixtures/websocket/config.toml", struct { file := s.adaptFile(c, "fixtures/websocket/config.toml", struct {
WebsocketServer string WebsocketServer string
}{ }{
WebsocketServer: srv.URL, WebsocketServer: srv.URL,
}) })
defer os.Remove(file) defer os.Remove(file)
cmd, _ := suite.cmdTraefik(withConfigFile(file), "--debug") cmd, display := s.traefikCmd(withConfigFile(file), "--debug")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -117,7 +119,7 @@ func (suite *WebsocketSuite) TestWrongOrigin(c *check.C) {
c.Assert(err, checker.ErrorMatches, "bad status") c.Assert(err, checker.ErrorMatches, "bad status")
} }
func (suite *WebsocketSuite) TestOrigin(c *check.C) { func (s *WebsocketSuite) TestOrigin(c *check.C) {
// use default options // use default options
var upgrader = gorillawebsocket.Upgrader{} var upgrader = gorillawebsocket.Upgrader{}
@ -139,14 +141,15 @@ func (suite *WebsocketSuite) TestOrigin(c *check.C) {
} }
})) }))
file := suite.adaptFile(c, "fixtures/websocket/config.toml", struct { file := s.adaptFile(c, "fixtures/websocket/config.toml", struct {
WebsocketServer string WebsocketServer string
}{ }{
WebsocketServer: srv.URL, WebsocketServer: srv.URL,
}) })
defer os.Remove(file) defer os.Remove(file)
cmd, _ := suite.cmdTraefik(withConfigFile(file), "--debug") cmd, display := s.traefikCmd(withConfigFile(file), "--debug")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -176,7 +179,7 @@ func (suite *WebsocketSuite) TestOrigin(c *check.C) {
} }
func (suite *WebsocketSuite) TestWrongOriginIgnoredByServer(c *check.C) { func (s *WebsocketSuite) TestWrongOriginIgnoredByServer(c *check.C) {
var upgrader = gorillawebsocket.Upgrader{CheckOrigin: func(r *http.Request) bool { var upgrader = gorillawebsocket.Upgrader{CheckOrigin: func(r *http.Request) bool {
return true return true
}} }}
@ -199,14 +202,15 @@ func (suite *WebsocketSuite) TestWrongOriginIgnoredByServer(c *check.C) {
} }
})) }))
file := suite.adaptFile(c, "fixtures/websocket/config.toml", struct { file := s.adaptFile(c, "fixtures/websocket/config.toml", struct {
WebsocketServer string WebsocketServer string
}{ }{
WebsocketServer: srv.URL, WebsocketServer: srv.URL,
}) })
defer os.Remove(file) defer os.Remove(file)
cmd, _ := suite.cmdTraefik(withConfigFile(file), "--debug") cmd, display := s.traefikCmd(withConfigFile(file), "--debug")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -236,7 +240,7 @@ func (suite *WebsocketSuite) TestWrongOriginIgnoredByServer(c *check.C) {
} }
func (suite *WebsocketSuite) TestSSLTermination(c *check.C) { func (s *WebsocketSuite) TestSSLTermination(c *check.C) {
var upgrader = gorillawebsocket.Upgrader{} // use default options var upgrader = gorillawebsocket.Upgrader{} // use default options
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -256,14 +260,15 @@ func (suite *WebsocketSuite) TestSSLTermination(c *check.C) {
} }
} }
})) }))
file := suite.adaptFile(c, "fixtures/websocket/config_https.toml", struct { file := s.adaptFile(c, "fixtures/websocket/config_https.toml", struct {
WebsocketServer string WebsocketServer string
}{ }{
WebsocketServer: srv.URL, WebsocketServer: srv.URL,
}) })
defer os.Remove(file) defer os.Remove(file)
cmd, _ := suite.cmdTraefik(withConfigFile(file), "--debug") cmd, display := s.traefikCmd(withConfigFile(file), "--debug")
defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)

View file

@ -227,8 +227,10 @@ func RotateFile() error {
return nil return nil
} }
if err := CloseFile(); err != nil { if logFile != nil {
return fmt.Errorf("error closing log file: %s", err) defer func(f *os.File) {
f.Close()
}(logFile)
} }
if err := OpenFile(logFilePath); err != nil { if err := OpenFile(logFilePath); err != nil {

79
log/logger_test.go Normal file
View file

@ -0,0 +1,79 @@
package log
import (
"io/ioutil"
"os"
"strings"
"testing"
"time"
)
func TestLogRotation(t *testing.T) {
tempDir, err := ioutil.TempDir("", "traefik_")
if err != nil {
t.Fatalf("Error setting up temporary directory: %s", err)
}
fileName := tempDir + "traefik.log"
if err := OpenFile(fileName); err != nil {
t.Fatalf("Error opening temporary file %s: %s", fileName, err)
}
defer CloseFile()
rotatedFileName := fileName + ".rotated"
iterations := 20
halfDone := make(chan bool)
writeDone := make(chan bool)
go func() {
for i := 0; i < iterations; i++ {
Println("Test log line")
if i == iterations/2 {
halfDone <- true
}
}
writeDone <- true
}()
<-halfDone
err = os.Rename(fileName, rotatedFileName)
if err != nil {
t.Fatalf("Error renaming file: %s", err)
}
err = RotateFile()
if err != nil {
t.Fatalf("Error rotating file: %s", err)
}
select {
case <-writeDone:
gotLineCount := lineCount(t, fileName) + lineCount(t, rotatedFileName)
if iterations != gotLineCount {
t.Errorf("Wanted %d written log lines, got %d", iterations, gotLineCount)
}
case <-time.After(500 * time.Millisecond):
t.Fatalf("test timed out")
}
close(halfDone)
close(writeDone)
}
func lineCount(t *testing.T, fileName string) int {
t.Helper()
fileContents, err := ioutil.ReadFile(fileName)
if err != nil {
t.Fatalf("Error reading from file %s: %s", fileName, err)
}
count := 0
for _, line := range strings.Split(string(fileContents), "\n") {
if strings.TrimSpace(line) == "" {
continue
}
count++
}
return count
}

View file

@ -8,6 +8,7 @@ import (
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
"sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -34,6 +35,7 @@ type LogHandler struct {
logger *logrus.Logger logger *logrus.Logger
file *os.File file *os.File
filePath string filePath string
mu sync.Mutex
} }
// NewLogHandler creates a new LogHandler // NewLogHandler creates a new LogHandler
@ -148,14 +150,19 @@ func (l *LogHandler) Close() error {
// by an external source. // by an external source.
func (l *LogHandler) Rotate() error { func (l *LogHandler) Rotate() error {
var err error var err error
if err = l.Close(); err != nil {
return err if l.file != nil {
defer func(f *os.File) {
f.Close()
}(l.file)
} }
l.file, err = os.OpenFile(l.filePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) l.file, err = os.OpenFile(l.filePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
if err != nil { if err != nil {
return err return err
} }
l.mu.Lock()
defer l.mu.Unlock()
l.logger.Out = l.file l.logger.Out = l.file
return nil return nil
} }
@ -226,6 +233,8 @@ func (l *LogHandler) logTheRoundTrip(logDataTable *LogData, crr *captureRequestR
fields["downstream_"+k] = logDataTable.DownstreamResponse.Get(k) fields["downstream_"+k] = logDataTable.DownstreamResponse.Get(k)
} }
l.mu.Lock()
defer l.mu.Unlock()
l.logger.WithFields(fields).Println() l.logger.WithFields(fields).Println()
} }

View file

@ -10,7 +10,9 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"regexp" "regexp"
"strings"
"testing" "testing"
"time"
"github.com/containous/traefik/types" "github.com/containous/traefik/types"
shellwords "github.com/mattn/go-shellwords" shellwords "github.com/mattn/go-shellwords"
@ -36,6 +38,84 @@ var (
testRetryAttempts = 2 testRetryAttempts = 2
) )
func TestLogRotation(t *testing.T) {
tempDir, err := ioutil.TempDir("", "traefik_")
if err != nil {
t.Fatalf("Error setting up temporary directory: %s", err)
}
fileName := tempDir + "traefik.log"
rotatedFileName := fileName + ".rotated"
config := &types.AccessLog{FilePath: fileName, Format: CommonFormat}
logHandler, err := NewLogHandler(config)
if err != nil {
t.Fatalf("Error creating new log handler: %s", err)
}
defer logHandler.Close()
recorder := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodGet, "http://localhost", nil)
next := func(rw http.ResponseWriter, req *http.Request) {
rw.WriteHeader(http.StatusOK)
}
iterations := 20
halfDone := make(chan bool)
writeDone := make(chan bool)
go func() {
for i := 0; i < iterations; i++ {
logHandler.ServeHTTP(recorder, req, next)
if i == iterations/2 {
halfDone <- true
}
}
writeDone <- true
}()
<-halfDone
err = os.Rename(fileName, rotatedFileName)
if err != nil {
t.Fatalf("Error renaming file: %s", err)
}
err = logHandler.Rotate()
if err != nil {
t.Fatalf("Error rotating file: %s", err)
}
select {
case <-writeDone:
gotLineCount := lineCount(t, fileName) + lineCount(t, rotatedFileName)
if iterations != gotLineCount {
t.Errorf("Wanted %d written log lines, got %d", iterations, gotLineCount)
}
case <-time.After(500 * time.Millisecond):
t.Fatalf("test timed out")
}
close(halfDone)
close(writeDone)
}
func lineCount(t *testing.T, fileName string) int {
t.Helper()
fileContents, err := ioutil.ReadFile(fileName)
if err != nil {
t.Fatalf("Error reading from file %s: %s", fileName, err)
}
count := 0
for _, line := range strings.Split(string(fileContents), "\n") {
if strings.TrimSpace(line) == "" {
continue
}
count++
}
return count
}
func TestLoggerCLF(t *testing.T) { func TestLoggerCLF(t *testing.T) {
tmpDir := createTempDir(t, CommonFormat) tmpDir := createTempDir(t, CommonFormat)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)

View file

@ -1,4 +1,4 @@
package middlewares package auth
import ( import (
"fmt" "fmt"
@ -7,7 +7,6 @@ import (
"strings" "strings"
goauth "github.com/abbot/go-http-auth" goauth "github.com/abbot/go-http-auth"
"github.com/containous/traefik/auth"
"github.com/containous/traefik/log" "github.com/containous/traefik/log"
"github.com/containous/traefik/types" "github.com/containous/traefik/types"
"github.com/urfave/negroni" "github.com/urfave/negroni"
@ -64,52 +63,12 @@ func NewAuthenticator(authConfig *types.Auth) (*Authenticator, error) {
}) })
} else if authConfig.Forward != nil { } else if authConfig.Forward != nil {
authenticator.handler = negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { authenticator.handler = negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
auth.Forward(authConfig.Forward, w, r, next) Forward(authConfig.Forward, w, r, next)
}) })
} }
return &authenticator, nil return &authenticator, nil
} }
func parserBasicUsers(basic *types.Basic) (map[string]string, error) {
var userStrs []string
if basic.UsersFile != "" {
var err error
if userStrs, err = getLinesFromFile(basic.UsersFile); err != nil {
return nil, err
}
}
userStrs = append(basic.Users, userStrs...)
userMap := make(map[string]string)
for _, user := range userStrs {
split := strings.Split(user, ":")
if len(split) != 2 {
return nil, fmt.Errorf("Error parsing Authenticator user: %v", user)
}
userMap[split[0]] = split[1]
}
return userMap, nil
}
func parserDigestUsers(digest *types.Digest) (map[string]string, error) {
var userStrs []string
if digest.UsersFile != "" {
var err error
if userStrs, err = getLinesFromFile(digest.UsersFile); err != nil {
return nil, err
}
}
userStrs = append(digest.Users, userStrs...)
userMap := make(map[string]string)
for _, user := range userStrs {
split := strings.Split(user, ":")
if len(split) != 3 {
return nil, fmt.Errorf("Error parsing Authenticator user: %v", user)
}
userMap[split[0]+":"+split[1]] = split[2]
}
return userMap, nil
}
func getLinesFromFile(filename string) ([]string, error) { func getLinesFromFile(filename string) ([]string, error) {
dat, err := ioutil.ReadFile(filename) dat, err := ioutil.ReadFile(filename)
if err != nil { if err != nil {

View file

@ -1,4 +1,4 @@
package middlewares package auth
import ( import (
"fmt" "fmt"
@ -157,7 +157,7 @@ func TestDigestAuthFail(t *testing.T) {
} }
func TestBasicAuthUserHeader(t *testing.T) { func TestBasicAuthUserHeader(t *testing.T) {
authMiddleware, err := NewAuthenticator(&types.Auth{ middleware, err := NewAuthenticator(&types.Auth{
Basic: &types.Basic{ Basic: &types.Basic{
Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"}, Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"},
}, },
@ -169,7 +169,7 @@ func TestBasicAuthUserHeader(t *testing.T) {
assert.Equal(t, "test", r.Header["X-Webauth-User"][0], "auth user should be set") assert.Equal(t, "test", r.Header["X-Webauth-User"][0], "auth user should be set")
fmt.Fprintln(w, "traefik") fmt.Fprintln(w, "traefik")
}) })
n := negroni.New(authMiddleware) n := negroni.New(middleware)
n.UseHandler(handler) n.UseHandler(handler)
ts := httptest.NewServer(n) ts := httptest.NewServer(n)
defer ts.Close() defer ts.Close()
@ -186,67 +186,3 @@ func TestBasicAuthUserHeader(t *testing.T) {
assert.NoError(t, err, "there should be no error") assert.NoError(t, err, "there should be no error")
assert.Equal(t, "traefik\n", string(body), "they should be equal") assert.Equal(t, "traefik\n", string(body), "they should be equal")
} }
func TestForwardAuthFail(t *testing.T) {
authTs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Forbidden", http.StatusForbidden)
}))
defer authTs.Close()
authMiddleware, err := NewAuthenticator(&types.Auth{
Forward: &types.Forward{
Address: authTs.URL,
},
})
assert.NoError(t, err, "there should be no error")
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "traefik")
})
n := negroni.New(authMiddleware)
n.UseHandler(handler)
ts := httptest.NewServer(n)
defer ts.Close()
client := &http.Client{}
req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil)
res, err := client.Do(req)
assert.NoError(t, err, "there should be no error")
assert.Equal(t, http.StatusForbidden, res.StatusCode, "they should be equal")
body, err := ioutil.ReadAll(res.Body)
assert.NoError(t, err, "there should be no error")
assert.Equal(t, "Forbidden\n", string(body), "they should be equal")
}
func TestForwardAuthSuccess(t *testing.T) {
authTs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Success")
}))
defer authTs.Close()
authMiddleware, err := NewAuthenticator(&types.Auth{
Forward: &types.Forward{
Address: authTs.URL,
},
})
assert.NoError(t, err, "there should be no error")
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "traefik")
})
n := negroni.New(authMiddleware)
n.UseHandler(handler)
ts := httptest.NewServer(n)
defer ts.Close()
client := &http.Client{}
req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil)
res, err := client.Do(req)
assert.NoError(t, err, "there should be no error")
assert.Equal(t, http.StatusOK, res.StatusCode, "they should be equal")
body, err := ioutil.ReadAll(res.Body)
assert.NoError(t, err, "there should be no error")
assert.Equal(t, "traefik\n", string(body), "they should be equal")
}

View file

@ -0,0 +1,97 @@
package auth
import (
"io/ioutil"
"net"
"net/http"
"strings"
"github.com/containous/traefik/log"
"github.com/containous/traefik/types"
"github.com/vulcand/oxy/forward"
"github.com/vulcand/oxy/utils"
)
// Forward the authentication to a external server
func Forward(config *types.Forward, w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
httpClient := http.Client{}
if config.TLS != nil {
tlsConfig, err := config.TLS.CreateTLSConfig()
if err != nil {
log.Debugf("Impossible to configure TLS to call %s. Cause %s", config.Address, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
httpClient.Transport = &http.Transport{
TLSClientConfig: tlsConfig,
}
}
forwardReq, err := http.NewRequest(http.MethodGet, config.Address, nil)
if err != nil {
log.Debugf("Error calling %s. Cause %s", config.Address, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
writeHeader(r, forwardReq, config.TrustForwardHeader)
forwardResponse, forwardErr := httpClient.Do(forwardReq)
if forwardErr != nil {
log.Debugf("Error calling %s. Cause: %s", config.Address, forwardErr)
w.WriteHeader(http.StatusInternalServerError)
return
}
body, readError := ioutil.ReadAll(forwardResponse.Body)
if readError != nil {
log.Debugf("Error reading body %s. Cause: %s", config.Address, readError)
w.WriteHeader(http.StatusInternalServerError)
return
}
defer forwardResponse.Body.Close()
if forwardResponse.StatusCode < http.StatusOK || forwardResponse.StatusCode >= http.StatusMultipleChoices {
log.Debugf("Remote error %s. StatusCode: %d", config.Address, forwardResponse.StatusCode)
w.WriteHeader(forwardResponse.StatusCode)
w.Write(body)
return
}
r.RequestURI = r.URL.RequestURI()
next(w, r)
}
func writeHeader(req *http.Request, forwardReq *http.Request, trustForwardHeader bool) {
utils.CopyHeaders(forwardReq.Header, req.Header)
if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
if trustForwardHeader {
if prior, ok := req.Header[forward.XForwardedFor]; ok {
clientIP = strings.Join(prior, ", ") + ", " + clientIP
}
}
forwardReq.Header.Set(forward.XForwardedFor, clientIP)
}
if xfp := req.Header.Get(forward.XForwardedProto); xfp != "" && trustForwardHeader {
forwardReq.Header.Set(forward.XForwardedProto, xfp)
} else if req.TLS != nil {
forwardReq.Header.Set(forward.XForwardedProto, "https")
} else {
forwardReq.Header.Set(forward.XForwardedProto, "http")
}
if xfp := req.Header.Get(forward.XForwardedPort); xfp != "" && trustForwardHeader {
forwardReq.Header.Set(forward.XForwardedPort, xfp)
}
if xfh := req.Header.Get(forward.XForwardedHost); xfh != "" && trustForwardHeader {
forwardReq.Header.Set(forward.XForwardedHost, xfh)
} else if req.Host != "" {
forwardReq.Header.Set(forward.XForwardedHost, req.Host)
} else {
forwardReq.Header.Del(forward.XForwardedHost)
}
}

View file

@ -0,0 +1,162 @@
package auth
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/containous/traefik/testhelpers"
"github.com/containous/traefik/types"
"github.com/stretchr/testify/assert"
"github.com/urfave/negroni"
)
func TestForwardAuthFail(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Forbidden", http.StatusForbidden)
}))
defer server.Close()
middleware, err := NewAuthenticator(&types.Auth{
Forward: &types.Forward{
Address: server.URL,
},
})
assert.NoError(t, err, "there should be no error")
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "traefik")
})
n := negroni.New(middleware)
n.UseHandler(handler)
ts := httptest.NewServer(n)
defer ts.Close()
client := &http.Client{}
req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil)
res, err := client.Do(req)
assert.NoError(t, err, "there should be no error")
assert.Equal(t, http.StatusForbidden, res.StatusCode, "they should be equal")
body, err := ioutil.ReadAll(res.Body)
assert.NoError(t, err, "there should be no error")
assert.Equal(t, "Forbidden\n", string(body), "they should be equal")
}
func TestForwardAuthSuccess(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Success")
}))
defer server.Close()
middleware, err := NewAuthenticator(&types.Auth{
Forward: &types.Forward{
Address: server.URL,
},
})
assert.NoError(t, err, "there should be no error")
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "traefik")
})
n := negroni.New(middleware)
n.UseHandler(handler)
ts := httptest.NewServer(n)
defer ts.Close()
client := &http.Client{}
req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil)
res, err := client.Do(req)
assert.NoError(t, err, "there should be no error")
assert.Equal(t, http.StatusOK, res.StatusCode, "they should be equal")
body, err := ioutil.ReadAll(res.Body)
assert.NoError(t, err, "there should be no error")
assert.Equal(t, "traefik\n", string(body), "they should be equal")
}
func Test_writeHeader(t *testing.T) {
testCases := []struct {
name string
headers map[string]string
trustForwardHeader bool
emptyHost bool
expectedHeaders map[string]string
}{
{
name: "trust Forward Header",
headers: map[string]string{
"Accept": "application/json",
"X-Forwarded-Host": "fii.bir",
},
trustForwardHeader: true,
expectedHeaders: map[string]string{
"Accept": "application/json",
"X-Forwarded-Host": "fii.bir",
},
},
{
name: "not trust Forward Header",
headers: map[string]string{
"Accept": "application/json",
"X-Forwarded-Host": "fii.bir",
},
trustForwardHeader: false,
expectedHeaders: map[string]string{
"Accept": "application/json",
"X-Forwarded-Host": "foo.bar",
},
},
{
name: "trust Forward Header with empty Host",
headers: map[string]string{
"Accept": "application/json",
"X-Forwarded-Host": "fii.bir",
},
trustForwardHeader: true,
emptyHost: true,
expectedHeaders: map[string]string{
"Accept": "application/json",
"X-Forwarded-Host": "fii.bir",
},
},
{
name: "not trust Forward Header with empty Host",
headers: map[string]string{
"Accept": "application/json",
"X-Forwarded-Host": "fii.bir",
},
trustForwardHeader: false,
emptyHost: true,
expectedHeaders: map[string]string{
"Accept": "application/json",
"X-Forwarded-Host": "",
},
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
req := testhelpers.MustNewRequest(http.MethodGet, "http://foo.bar", nil)
for key, value := range test.headers {
req.Header.Set(key, value)
}
if test.emptyHost {
req.Host = ""
}
forwardReq := testhelpers.MustNewRequest(http.MethodGet, "http://foo.bar", nil)
writeHeader(req, forwardReq, test.trustForwardHeader)
for key, value := range test.expectedHeaders {
assert.Equal(t, value, forwardReq.Header.Get(key))
}
})
}
}

View file

@ -0,0 +1,48 @@
package auth
import (
"fmt"
"strings"
"github.com/containous/traefik/types"
)
func parserBasicUsers(basic *types.Basic) (map[string]string, error) {
var userStrs []string
if basic.UsersFile != "" {
var err error
if userStrs, err = getLinesFromFile(basic.UsersFile); err != nil {
return nil, err
}
}
userStrs = append(basic.Users, userStrs...)
userMap := make(map[string]string)
for _, user := range userStrs {
split := strings.Split(user, ":")
if len(split) != 2 {
return nil, fmt.Errorf("Error parsing Authenticator user: %v", user)
}
userMap[split[0]] = split[1]
}
return userMap, nil
}
func parserDigestUsers(digest *types.Digest) (map[string]string, error) {
var userStrs []string
if digest.UsersFile != "" {
var err error
if userStrs, err = getLinesFromFile(digest.UsersFile); err != nil {
return nil, err
}
}
userStrs = append(digest.Users, userStrs...)
userMap := make(map[string]string)
for _, user := range userStrs {
split := strings.Split(user, ":")
if len(split) != 3 {
return nil, fmt.Errorf("Error parsing Authenticator user: %v", user)
}
userMap[split[0]+":"+split[1]] = split[2]
}
return userMap, nil
}

View file

@ -18,7 +18,6 @@ func (c *Compress) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.
func gzipHandler(h http.Handler) http.Handler { func gzipHandler(h http.Handler) http.Handler {
wrapper, err := gziphandler.GzipHandlerWithOpts( wrapper, err := gziphandler.GzipHandlerWithOpts(
&gziphandler.GzipResponseWriterWrapper{},
gziphandler.CompressionLevel(gzip.DefaultCompression), gziphandler.CompressionLevel(gzip.DefaultCompression),
gziphandler.MinSize(gziphandler.DefaultMinSize)) gziphandler.MinSize(gziphandler.DefaultMinSize))
if err != nil { if err != nil {

View file

@ -6,18 +6,22 @@ site_url: https://docs.traefik.io
repo_name: 'GitHub' repo_name: 'GitHub'
repo_url: 'https://github.com/containous/traefik' repo_url: 'https://github.com/containous/traefik'
# Documentation and theme # Documentation
docs_dir: 'docs' docs_dir: 'docs'
#theme: united #theme: united
# theme: readthedocs #theme: readthedocs
theme: 'material' theme: 'material'
# theme: bootstrap #theme: bootstrap
site_favicon: 'img/traefik.icon.png' site_favicon: 'img/traefik.icon.png'
# Copyright
copyright: "Copyright &copy; 2016-2017 Containous SAS" copyright: "Copyright &copy; 2016-2017 Containous SAS"
google_analytics:
- 'UA-51880359-3'
- 'docs.traefik.io'
# Options # Options
extra: extra:
logo: img/traefik.logo.png logo: img/traefik.logo.png
@ -46,8 +50,8 @@ extra:
theme_dir: docs/theme/ theme_dir: docs/theme/
extra_css: extra_css:
- theme/styles/extra.css - theme/styles/extra.css
- theme/styles/atom-one-light.css - theme/styles/atom-one-light.css
extra_javascript: extra_javascript:
- theme/js/hljs/highlight.pack.js - theme/js/hljs/highlight.pack.js
@ -82,11 +86,12 @@ pages:
- 'Backend: Zookeeper': 'configuration/backends/zookeeper.md' - 'Backend: Zookeeper': 'configuration/backends/zookeeper.md'
- User Guides: - User Guides:
- 'Configuration Examples': 'user-guide/examples.md' - 'Configuration Examples': 'user-guide/examples.md'
- 'Swarm Cluster': 'user-guide/swarm.md'
- 'Swarm Mode Cluster': 'user-guide/swarm-mode.md' - 'Swarm Mode Cluster': 'user-guide/swarm-mode.md'
- 'Swarm Cluster': 'user-guide/swarm.md'
- 'Let''s Encrypt & Docker': 'user-guide/docker-and-lets-encrypt.md'
- 'Kubernetes': 'user-guide/kubernetes.md' - 'Kubernetes': 'user-guide/kubernetes.md'
- 'Marathon': 'user-guide/marathon.md' - 'Marathon': 'user-guide/marathon.md'
- 'Key-value Store Configuration': 'user-guide/kv-config.md' - 'Key-value Store Configuration': 'user-guide/kv-config.md'
- 'Clustering/HA': 'user-guide/cluster.md' - 'Clustering/HA': 'user-guide/cluster.md'
- 'Let''s Encrypt & Docker': 'user-guide/docker-and-lets-encrypt.md' - 'gRPC Example': 'user-guide/grpc.md'
- Benchmarks: benchmarks.md - Benchmarks: benchmarks.md

View file

@ -10,10 +10,14 @@ import (
"github.com/containous/traefik/log" "github.com/containous/traefik/log"
"github.com/containous/traefik/safe" "github.com/containous/traefik/safe"
"github.com/containous/traefik/types" "github.com/containous/traefik/types"
"github.com/mitchellh/mapstructure"
rancher "github.com/rancher/go-rancher/client" rancher "github.com/rancher/go-rancher/client"
) )
const labelRancheStackServiceName = "io.rancher.stack_service.name" const (
labelRancherStackServiceName = "io.rancher.stack_service.name"
hostNetwork = "host"
)
var withoutPagination *rancher.ListOpts var withoutPagination *rancher.ListOpts
@ -221,9 +225,24 @@ func parseAPISourcedRancherData(environments []*rancher.Environment, services []
} }
for _, container := range containers { for _, container := range containers {
if container.Labels[labelRancheStackServiceName] == rancherData.Name && if container.Labels[labelRancherStackServiceName] == rancherData.Name &&
containerFilter(container.Name, container.HealthState, container.State) { containerFilter(container.Name, container.HealthState, container.State) {
rancherData.Containers = append(rancherData.Containers, container.PrimaryIpAddress)
if container.NetworkMode == hostNetwork {
var endpoints []*rancher.PublicEndpoint
err := mapstructure.Decode(service.PublicEndpoints, &endpoints)
if err != nil {
log.Errorf("Failed to decode PublicEndpoint: %v", err)
continue
}
if len(endpoints) > 0 {
rancherData.Containers = append(rancherData.Containers, endpoints[0].IpAddress)
}
} else {
rancherData.Containers = append(rancherData.Containers, container.PrimaryIpAddress)
}
} }
} }
rancherDataList = append(rancherDataList, rancherData) rancherDataList = append(rancherDataList, rancherData)

View file

@ -38,13 +38,13 @@ func (p *Provider) metadataProvide(configurationChan chan<- types.ConfigMessage,
updateConfiguration := func(version string) { updateConfiguration := func(version string) {
log.WithField("metadata_version", version).Debugln("Refreshing configuration from Rancher metadata service") log.WithField("metadata_version", version).Debugln("Refreshing configuration from Rancher metadata service")
services, err := client.GetServices() stacks, err := client.GetStacks()
if err != nil { if err != nil {
log.Errorf("Failed to query Rancher metadata service: %s", err) log.Errorf("Failed to query Rancher metadata service: %s", err)
return return
} }
rancherData := parseMetadataSourcedRancherData(services) rancherData := parseMetadataSourcedRancherData(stacks)
configuration := p.loadRancherConfig(rancherData) configuration := p.loadRancherConfig(rancherData)
configurationChan <- types.ConfigMessage{ configurationChan <- types.ConfigMessage{
ProviderName: "rancher", ProviderName: "rancher",
@ -118,21 +118,23 @@ func (p *Provider) longPoll(client rancher.Client, updateConfiguration func(stri
<-stop <-stop
} }
func parseMetadataSourcedRancherData(services []rancher.Service) (rancherDataList []rancherData) { func parseMetadataSourcedRancherData(stacks []rancher.Stack) (rancherDataList []rancherData) {
for _, service := range services { for _, stack := range stacks {
var containerIPAddresses []string for _, service := range stack.Services {
for _, container := range service.Containers { var containerIPAddresses []string
if containerFilter(container.Name, container.HealthState, container.State) { for _, container := range service.Containers {
containerIPAddresses = append(containerIPAddresses, container.PrimaryIp) if containerFilter(container.Name, container.HealthState, container.State) {
containerIPAddresses = append(containerIPAddresses, container.PrimaryIp)
}
} }
}
rancherDataList = append(rancherDataList, rancherData{ rancherDataList = append(rancherDataList, rancherData{
Name: service.Name, Name: stack.Name + "/" + service.Name,
State: service.State, State: service.State,
Labels: service.Labels, Labels: service.Labels,
Containers: containerIPAddresses, Containers: containerIPAddresses,
}) })
}
} }
return rancherDataList return rancherDataList
} }

View file

@ -12,6 +12,7 @@ import (
"github.com/containous/traefik/autogen" "github.com/containous/traefik/autogen"
"github.com/containous/traefik/log" "github.com/containous/traefik/log"
"github.com/containous/traefik/middlewares" "github.com/containous/traefik/middlewares"
mauth "github.com/containous/traefik/middlewares/auth"
"github.com/containous/traefik/safe" "github.com/containous/traefik/safe"
"github.com/containous/traefik/types" "github.com/containous/traefik/types"
"github.com/containous/traefik/version" "github.com/containous/traefik/version"
@ -135,7 +136,7 @@ func (provider *Provider) Provide(configurationChan chan<- types.ConfigMessage,
var err error var err error
var negroniInstance = negroni.New() var negroniInstance = negroni.New()
if provider.Auth != nil { if provider.Auth != nil {
authMiddleware, err := middlewares.NewAuthenticator(provider.Auth) authMiddleware, err := mauth.NewAuthenticator(provider.Auth)
if err != nil { if err != nil {
log.Fatal("Error creating Auth: ", err) log.Fatal("Error creating Auth: ", err)
} }

View file

@ -27,6 +27,7 @@ import (
"github.com/containous/traefik/metrics" "github.com/containous/traefik/metrics"
"github.com/containous/traefik/middlewares" "github.com/containous/traefik/middlewares"
"github.com/containous/traefik/middlewares/accesslog" "github.com/containous/traefik/middlewares/accesslog"
mauth "github.com/containous/traefik/middlewares/auth"
"github.com/containous/traefik/provider" "github.com/containous/traefik/provider"
"github.com/containous/traefik/safe" "github.com/containous/traefik/safe"
"github.com/containous/traefik/types" "github.com/containous/traefik/types"
@ -283,7 +284,7 @@ func (server *Server) setupServerEntryPoint(newServerEntryPointName string, newS
} }
} }
if server.globalConfiguration.EntryPoints[newServerEntryPointName].Auth != nil { if server.globalConfiguration.EntryPoints[newServerEntryPointName].Auth != nil {
authMiddleware, err := middlewares.NewAuthenticator(server.globalConfiguration.EntryPoints[newServerEntryPointName].Auth) authMiddleware, err := mauth.NewAuthenticator(server.globalConfiguration.EntryPoints[newServerEntryPointName].Auth)
if err != nil { if err != nil {
log.Fatal("Error starting server: ", err) log.Fatal("Error starting server: ", err)
} }
@ -945,7 +946,7 @@ func (server *Server) loadConfig(configurations types.Configurations, globalConf
auth.Basic = &types.Basic{ auth.Basic = &types.Basic{
Users: users, Users: users,
} }
authMiddleware, err := middlewares.NewAuthenticator(auth) authMiddleware, err := mauth.NewAuthenticator(auth)
if err != nil { if err != nil {
log.Errorf("Error creating Auth: %s", err) log.Errorf("Error creating Auth: %s", err)
} else { } else {
@ -1122,6 +1123,7 @@ func parseHealthCheckOptions(lb healthcheck.LoadBalancer, backend string, hc *ty
return &healthcheck.Options{ return &healthcheck.Options{
Path: hc.Path, Path: hc.Path,
Port: hc.Port,
Interval: interval, Interval: interval,
LB: lb, LB: lb,
} }

View file

@ -27,7 +27,7 @@ func (server *Server) listenSignals() {
} }
if err := log.RotateFile(); err != nil { if err := log.RotateFile(); err != nil {
log.Errorf("Error rotating error log: %s", err) log.Errorf("Error rotating traefik log: %s", err)
} }
default: default:
log.Infof("I have to go... %+v", sig) log.Infof("I have to go... %+v", sig)

View file

@ -1,16 +1,15 @@
package types package types
import ( import (
"crypto/tls"
"crypto/x509"
"encoding" "encoding"
"errors" "errors"
"fmt" "fmt"
"strconv"
"strings"
"crypto/tls"
"crypto/x509"
"io/ioutil" "io/ioutil"
"os" "os"
"strconv"
"strings"
"github.com/containous/flaeg" "github.com/containous/flaeg"
"github.com/containous/traefik/log" "github.com/containous/traefik/log"
@ -47,6 +46,7 @@ type CircuitBreaker struct {
// HealthCheck holds HealthCheck configuration // HealthCheck holds HealthCheck configuration
type HealthCheck struct { type HealthCheck struct {
Path string `json:"path,omitempty"` Path string `json:"path,omitempty"`
Port int `json:"port,omitempty"`
Interval string `json:"interval,omitempty"` Interval string `json:"interval,omitempty"`
} }
@ -341,8 +341,9 @@ type Digest struct {
// Forward authentication // Forward authentication
type Forward struct { type Forward struct {
Address string `description:"Authentication server address"` Address string `description:"Authentication server address"`
TLS *ClientTLS `description:"Enable TLS support"` TLS *ClientTLS `description:"Enable TLS support"`
TrustForwardHeader bool `description:"Trust X-Forwarded-* headers"`
} }
// CanonicalDomain returns a lower case domain with trim space // CanonicalDomain returns a lower case domain with trim space

201
vendor/github.com/NYTimes/gziphandler/LICENSE generated vendored Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016-2017 The New York Times Company
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,13 +0,0 @@
Copyright (c) 2015 The New York Times Company
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this library except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -3,7 +3,6 @@ package gziphandler
import ( import (
"bufio" "bufio"
"compress/gzip" "compress/gzip"
"errors"
"fmt" "fmt"
"io" "io"
"net" "net"
@ -106,7 +105,7 @@ func (w *GzipResponseWriter) Write(b []byte) (int, error) {
// If the global writes are bigger than the minSize and we're about to write // If the global writes are bigger than the minSize and we're about to write
// a response containing a content type we want to handle, enable // a response containing a content type we want to handle, enable
// compression. // compression.
if len(w.buf) >= w.minSize && handleContentType(w.contentTypes, w) { if len(w.buf) >= w.minSize && handleContentType(w.contentTypes, w) && w.Header().Get(contentEncoding) == "" {
err := w.startGzip() err := w.startGzip()
if err != nil { if err != nil {
return 0, err return 0, err
@ -135,7 +134,7 @@ func (w *GzipResponseWriter) startGzip() error {
// Initialize the GZIP response. // Initialize the GZIP response.
w.init() w.init()
// Flush the buffer into the gzip reponse. // Flush the buffer into the gzip response.
n, err := w.gw.Write(w.buf) n, err := w.gw.Write(w.buf)
// This should never happen (per io.Writer docs), but if the write didn't // This should never happen (per io.Writer docs), but if the write didn't
@ -235,14 +234,10 @@ func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller // NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
// specify the minimum size before compression. // specify the minimum size before compression.
func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) { func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
return GzipHandlerWithOpts(&GzipResponseWriter{}, CompressionLevel(level), MinSize(minSize)) return GzipHandlerWithOpts(CompressionLevel(level), MinSize(minSize))
} }
func GzipHandlerWithOpts(gw GzipWriter, opts ...option) (func(http.Handler) http.Handler, error) { func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error) {
if gw == nil {
return nil, errors.New("the GzipWriter must be defined")
}
c := &config{ c := &config{
level: gzip.DefaultCompression, level: gzip.DefaultCompression,
minSize: DefaultMinSize, minSize: DefaultMinSize,
@ -263,10 +258,12 @@ func GzipHandlerWithOpts(gw GzipWriter, opts ...option) (func(http.Handler) http
w.Header().Add(vary, acceptEncoding) w.Header().Add(vary, acceptEncoding)
if acceptsGzip(r) { if acceptsGzip(r) {
gw.SetResponseWriter(w) gw := &GzipResponseWriter{
gw.setIndex(index) ResponseWriter: w,
gw.setMinSize(c.minSize) index: index,
gw.setContentTypes(c.contentTypes) minSize: c.minSize,
contentTypes: c.contentTypes,
}
defer gw.Close() defer gw.Close()
h.ServeHTTP(gw, r) h.ServeHTTP(gw, r)

View file

@ -1,66 +0,0 @@
package gziphandler
import (
"bufio"
"net"
"net/http"
)
const (
contentEncodingHeader = "Content-Encoding"
)
// ----------
// http.ResponseWriter
// http.Hijacker
type GzipWriter interface {
Header() http.Header
Write([]byte) (int, error)
WriteHeader(int)
Hijack() (net.Conn, *bufio.ReadWriter, error)
Close() error
SetResponseWriter(http.ResponseWriter)
setIndex(int)
setMinSize(int)
setContentTypes([]string)
}
func (w *GzipResponseWriter) SetResponseWriter(rw http.ResponseWriter) {
w.ResponseWriter = rw
}
func (w *GzipResponseWriter) setIndex(index int) {
w.index = index
}
func (w *GzipResponseWriter) setMinSize(minSize int) {
w.minSize = minSize
}
func (w *GzipResponseWriter) setContentTypes(contentTypes []string) {
w.contentTypes = contentTypes
}
// --------
type GzipResponseWriterWrapper struct {
GzipResponseWriter
}
func (g *GzipResponseWriterWrapper) Write(b []byte) (int, error) {
if g.gw == nil && isEncoded(g.Header()) {
if g.code != 0 {
g.ResponseWriter.WriteHeader(g.code)
}
return g.ResponseWriter.Write(b)
}
return g.GzipResponseWriter.Write(b)
}
func isEncoded(headers http.Header) bool {
header := headers.Get(contentEncodingHeader)
// According to https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding,
// content is not encoded if the header 'Content-Encoding' is empty or equals to 'identity'.
return header != "" && header != "identity"
}

View file

@ -156,7 +156,7 @@ func (td *tempDir) newPath() string {
} }
} }
result := filepath.Join(td.path, strconv.Itoa(td.counter)) result := filepath.Join(td.path, strconv.Itoa(td.counter))
td.counter += 1 td.counter++
return result return result
} }
@ -274,7 +274,7 @@ func (c *C) logString(issue string) {
func (c *C) logCaller(skip int) { func (c *C) logCaller(skip int) {
// This is a bit heavier than it ought to be. // This is a bit heavier than it ought to be.
skip += 1 // Our own frame. skip++ // Our own frame.
pc, callerFile, callerLine, ok := runtime.Caller(skip) pc, callerFile, callerLine, ok := runtime.Caller(skip)
if !ok { if !ok {
return return
@ -284,7 +284,7 @@ func (c *C) logCaller(skip int) {
testFunc := runtime.FuncForPC(c.method.PC()) testFunc := runtime.FuncForPC(c.method.PC())
if runtime.FuncForPC(pc) != testFunc { if runtime.FuncForPC(pc) != testFunc {
for { for {
skip += 1 skip++
if pc, file, line, ok := runtime.Caller(skip); ok { if pc, file, line, ok := runtime.Caller(skip); ok {
// Note that the test line may be different on // Note that the test line may be different on
// distinct calls for the same test. Showing // distinct calls for the same test. Showing
@ -460,10 +460,10 @@ func (tracker *resultTracker) _loopRoutine() {
// Calls still running. Can't stop. // Calls still running. Can't stop.
select { select {
// XXX Reindent this (not now to make diff clear) // XXX Reindent this (not now to make diff clear)
case c = <-tracker._expectChan: case <-tracker._expectChan:
tracker._waiting += 1 tracker._waiting++
case c = <-tracker._doneChan: case c = <-tracker._doneChan:
tracker._waiting -= 1 tracker._waiting--
switch c.status() { switch c.status() {
case succeededSt: case succeededSt:
if c.kind == testKd { if c.kind == testKd {
@ -498,9 +498,9 @@ func (tracker *resultTracker) _loopRoutine() {
select { select {
case tracker._stopChan <- true: case tracker._stopChan <- true:
return return
case c = <-tracker._expectChan: case <-tracker._expectChan:
tracker._waiting += 1 tracker._waiting++
case c = <-tracker._doneChan: case <-tracker._doneChan:
panic("Tracker got an unexpected done call.") panic("Tracker got an unexpected done call.")
} }
} }
@ -522,6 +522,7 @@ type suiteRunner struct {
reportedProblemLast bool reportedProblemLast bool
benchTime time.Duration benchTime time.Duration
benchMem bool benchMem bool
abort bool
} }
type RunConf struct { type RunConf struct {
@ -533,6 +534,7 @@ type RunConf struct {
BenchmarkTime time.Duration // Defaults to 1 second BenchmarkTime time.Duration // Defaults to 1 second
BenchmarkMem bool BenchmarkMem bool
KeepWorkDir bool KeepWorkDir bool
Abort bool
} }
// Create a new suiteRunner able to run all methods in the given suite. // Create a new suiteRunner able to run all methods in the given suite.
@ -561,6 +563,7 @@ func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner {
tempDir: &tempDir{}, tempDir: &tempDir{},
keepDir: conf.KeepWorkDir, keepDir: conf.KeepWorkDir,
tests: make([]*methodType, 0, suiteNumMethods), tests: make([]*methodType, 0, suiteNumMethods),
abort: conf.Abort,
} }
if runner.benchTime == 0 { if runner.benchTime == 0 {
runner.benchTime = 1 * time.Second runner.benchTime = 1 * time.Second
@ -568,13 +571,13 @@ func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner {
var filterRegexp *regexp.Regexp var filterRegexp *regexp.Regexp
if conf.Filter != "" { if conf.Filter != "" {
if regexp, err := regexp.Compile(conf.Filter); err != nil { regexp, err := regexp.Compile(conf.Filter)
if err != nil {
msg := "Bad filter expression: " + err.Error() msg := "Bad filter expression: " + err.Error()
runner.tracker.result.RunError = errors.New(msg) runner.tracker.result.RunError = errors.New(msg)
return runner return runner
} else {
filterRegexp = regexp
} }
filterRegexp = regexp
} }
for i := 0; i != suiteNumMethods; i++ { for i := 0; i != suiteNumMethods; i++ {
@ -613,7 +616,9 @@ func (runner *suiteRunner) run() *Result {
if c == nil || c.status() == succeededSt { if c == nil || c.status() == succeededSt {
for i := 0; i != len(runner.tests); i++ { for i := 0; i != len(runner.tests); i++ {
c := runner.runTest(runner.tests[i]) c := runner.runTest(runner.tests[i])
if c.status() == fixturePanickedSt { status := c.status()
if status == fixturePanickedSt || runner.abort &&
(status == failedSt || status == panickedSt) {
runner.skipTests(missedSt, runner.tests[i+1:]) runner.skipTests(missedSt, runner.tests[i+1:])
break break
} }
@ -637,6 +642,16 @@ func (runner *suiteRunner) run() *Result {
return &runner.tracker.result return &runner.tracker.result
} }
// Skip all methods in the given suite.
func (runner *suiteRunner) skip() *Result {
if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {
runner.tracker.start()
runner.skipTests(missedSt, runner.tests)
runner.tracker.waitAndStop()
}
return &runner.tracker.result
}
// Create a call object with the given suite method, and fork a // Create a call object with the given suite method, and fork a
// goroutine with the provided dispatcher for running it. // goroutine with the provided dispatcher for running it.
func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
@ -871,84 +886,3 @@ func (runner *suiteRunner) reportCallDone(c *C) {
runner.output.WriteCallSuccess("MISS", c) runner.output.WriteCallSuccess("MISS", c)
} }
} }
// -----------------------------------------------------------------------
// Output writer manages atomic output writing according to settings.
type outputWriter struct {
m sync.Mutex
writer io.Writer
wroteCallProblemLast bool
Stream bool
Verbose bool
}
func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter {
return &outputWriter{writer: writer, Stream: stream, Verbose: verbose}
}
func (ow *outputWriter) Write(content []byte) (n int, err error) {
ow.m.Lock()
n, err = ow.writer.Write(content)
ow.m.Unlock()
return
}
func (ow *outputWriter) WriteCallStarted(label string, c *C) {
if ow.Stream {
header := renderCallHeader(label, c, "", "\n")
ow.m.Lock()
ow.writer.Write([]byte(header))
ow.m.Unlock()
}
}
func (ow *outputWriter) WriteCallProblem(label string, c *C) {
var prefix string
if !ow.Stream {
prefix = "\n-----------------------------------" +
"-----------------------------------\n"
}
header := renderCallHeader(label, c, prefix, "\n\n")
ow.m.Lock()
ow.wroteCallProblemLast = true
ow.writer.Write([]byte(header))
if !ow.Stream {
c.logb.WriteTo(ow.writer)
}
ow.m.Unlock()
}
func (ow *outputWriter) WriteCallSuccess(label string, c *C) {
if ow.Stream || (ow.Verbose && c.kind == testKd) {
// TODO Use a buffer here.
var suffix string
if c.reason != "" {
suffix = " (" + c.reason + ")"
}
if c.status() == succeededSt {
suffix += "\t" + c.timerString()
}
suffix += "\n"
if ow.Stream {
suffix += "\n"
}
header := renderCallHeader(label, c, "", suffix)
ow.m.Lock()
// Resist temptation of using line as prefix above due to race.
if !ow.Stream && ow.wroteCallProblemLast {
header = "\n-----------------------------------" +
"-----------------------------------\n" +
header
}
ow.wroteCallProblemLast = false
ow.writer.Write([]byte(header))
ow.m.Unlock()
}
}
func renderCallHeader(label string, c *C, prefix, suffix string) string {
pc := c.method.PC()
return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc),
niceFuncName(pc), suffix)
}

View file

@ -88,7 +88,7 @@ func (checker *notChecker) Info() *CheckerInfo {
} }
func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) { func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) {
result, error = checker.sub.Check(params, names) result, _ = checker.sub.Check(params, names)
result = !result result = !result
return return
} }
@ -212,7 +212,7 @@ type hasLenChecker struct {
// The HasLen checker verifies that the obtained value has the // The HasLen checker verifies that the obtained value has the
// provided length. In many cases this is superior to using Equals // provided length. In many cases this is superior to using Equals
// in conjuction with the len function because in case the check // in conjunction with the len function because in case the check
// fails the value itself will be printed, instead of its length, // fails the value itself will be printed, instead of its length,
// providing more details for figuring the problem. // providing more details for figuring the problem.
// //
@ -235,7 +235,10 @@ func (checker *hasLenChecker) Check(params []interface{}, names []string) (resul
default: default:
return false, "obtained value type has no length" return false, "obtained value type has no length"
} }
return value.Len() == n, "" if value.Len() == n {
return true, ""
}
return false, fmt.Sprintf("obtained length = %d", value.Len())
} }
// ----------------------------------------------------------------------- // -----------------------------------------------------------------------

View file

@ -212,7 +212,7 @@ func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker
// Do the actual check. // Do the actual check.
result, error := checker.Check(params, names) result, error := checker.Check(params, names)
if !result || error != "" { if !result {
c.logCaller(2) c.logCaller(2)
for i := 0; i != len(params); i++ { for i := 0; i != len(params); i++ {
c.logValue(names[i], params[i]) c.logValue(names[i], params[i])

88
vendor/github.com/go-check/check/reporter.go generated vendored Normal file
View file

@ -0,0 +1,88 @@
package check
import (
"fmt"
"io"
"sync"
)
// -----------------------------------------------------------------------
// Output writer manages atomic output writing according to settings.
type outputWriter struct {
m sync.Mutex
writer io.Writer
wroteCallProblemLast bool
Stream bool
Verbose bool
}
func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter {
return &outputWriter{writer: writer, Stream: stream, Verbose: verbose}
}
func (ow *outputWriter) Write(content []byte) (n int, err error) {
ow.m.Lock()
n, err = ow.writer.Write(content)
ow.m.Unlock()
return
}
func (ow *outputWriter) WriteCallStarted(label string, c *C) {
if ow.Stream {
header := renderCallHeader(label, c, "", "\n")
ow.m.Lock()
ow.writer.Write([]byte(header))
ow.m.Unlock()
}
}
func (ow *outputWriter) WriteCallProblem(label string, c *C) {
var prefix string
if !ow.Stream {
prefix = "\n-----------------------------------" +
"-----------------------------------\n"
}
header := renderCallHeader(label, c, prefix, "\n\n")
ow.m.Lock()
ow.wroteCallProblemLast = true
ow.writer.Write([]byte(header))
if !ow.Stream {
c.logb.WriteTo(ow.writer)
}
ow.m.Unlock()
}
func (ow *outputWriter) WriteCallSuccess(label string, c *C) {
if ow.Stream || (ow.Verbose && c.kind == testKd) {
// TODO Use a buffer here.
var suffix string
if c.reason != "" {
suffix = " (" + c.reason + ")"
}
if c.status() == succeededSt {
suffix += "\t" + c.timerString()
}
suffix += "\n"
if ow.Stream {
suffix += "\n"
}
header := renderCallHeader(label, c, "", suffix)
ow.m.Lock()
// Resist temptation of using line as prefix above due to race.
if !ow.Stream && ow.wroteCallProblemLast {
header = "\n-----------------------------------" +
"-----------------------------------\n" +
header
}
ow.wroteCallProblemLast = false
ow.writer.Write([]byte(header))
ow.m.Unlock()
}
}
func renderCallHeader(label string, c *C, prefix, suffix string) string {
pc := c.method.PC()
return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc),
niceFuncName(pc), suffix)
}

View file

@ -42,6 +42,7 @@ var (
newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks") newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks")
newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run") newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run")
newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory") newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory")
abort = flag.Bool("check.abort", true, "Stop testing the suite if a test has failed")
) )
// TestingT runs all test suites registered with the Suite function, // TestingT runs all test suites registered with the Suite function,
@ -60,6 +61,7 @@ func TestingT(testingT *testing.T) {
BenchmarkTime: benchTime, BenchmarkTime: benchTime,
BenchmarkMem: *newBenchMem, BenchmarkMem: *newBenchMem,
KeepWorkDir: *oldWorkFlag || *newWorkFlag, KeepWorkDir: *oldWorkFlag || *newWorkFlag,
Abort: *abort,
} }
if *oldListFlag || *newListFlag { if *oldListFlag || *newListFlag {
w := bufio.NewWriter(os.Stdout) w := bufio.NewWriter(os.Stdout)
@ -80,8 +82,21 @@ func TestingT(testingT *testing.T) {
// provided run configuration. // provided run configuration.
func RunAll(runConf *RunConf) *Result { func RunAll(runConf *RunConf) *Result {
result := Result{} result := Result{}
skipTests := false
for _, suite := range allSuites { for _, suite := range allSuites {
result.Add(Run(suite, runConf)) var res *Result
if skipTests {
// Count missed tests.
res = skipSuite(suite, runConf)
} else {
res = Run(suite, runConf)
}
result.Add(res)
if runConf.Abort && (res.Failed > 0 || res.Panicked > 0) {
skipTests = true
continue
}
} }
return &result return &result
} }
@ -92,6 +107,11 @@ func Run(suite interface{}, runConf *RunConf) *Result {
return runner.run() return runner.run()
} }
func skipSuite(suite interface{}, runConf *RunConf) *Result {
runner := newSuiteRunner(suite, runConf)
return runner.skip()
}
// ListAll returns the names of all the test functions registered with the // ListAll returns the names of all the test functions registered with the
// Suite function that will be run with the provided run configuration. // Suite function that will be run with the provided run configuration.
func ListAll(runConf *RunConf) []string { func ListAll(runConf *RunConf) []string {