Merge branch 'v1.4' into master

This commit is contained in:
Fernandez Ludovic 2017-09-21 11:12:39 +02:00
commit d6ef8ec3d1
7 changed files with 68 additions and 48 deletions

View file

@ -114,9 +114,7 @@ fmt:
gofmt -s -l -w $(SRCS)
pull-images:
for f in $(shell find ./integration/resources/compose/ -type f); do \
docker-compose -f $$f pull; \
done
cat ./integration/resources/compose/*.yml | grep -E '^\s+image:' | awk '{print $$2}' | sort | uniq | xargs -n 1 docker pull
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)

View file

@ -165,26 +165,43 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration {
FilePath: "",
}
// default HealthCheckConfig
healthCheck := configuration.HealthCheckConfig{
Interval: flaeg.Duration(configuration.DefaultHealthCheckInterval),
}
// default RespondingTimeouts
respondingTimeouts := configuration.RespondingTimeouts{
IdleTimeout: flaeg.Duration(configuration.DefaultIdleTimeout),
}
// default ForwardingTimeouts
forwardingTimeouts := configuration.ForwardingTimeouts{
DialTimeout: flaeg.Duration(configuration.DefaultDialTimeout),
}
defaultConfiguration := configuration.GlobalConfiguration{
Docker: &defaultDocker,
File: &defaultFile,
Web: &defaultWeb,
Marathon: &defaultMarathon,
Consul: &defaultConsul,
ConsulCatalog: &defaultConsulCatalog,
Etcd: &defaultEtcd,
Zookeeper: &defaultZookeeper,
Boltdb: &defaultBoltDb,
Kubernetes: &defaultKubernetes,
Mesos: &defaultMesos,
ECS: &defaultECS,
Rancher: &defaultRancher,
Eureka: &defaultEureka,
DynamoDB: &defaultDynamoDB,
Retry: &configuration.Retry{},
HealthCheck: &configuration.HealthCheckConfig{},
TraefikLog: &defaultTraefikLog,
AccessLog: &defaultAccessLog,
Docker: &defaultDocker,
File: &defaultFile,
Web: &defaultWeb,
Marathon: &defaultMarathon,
Consul: &defaultConsul,
ConsulCatalog: &defaultConsulCatalog,
Etcd: &defaultEtcd,
Zookeeper: &defaultZookeeper,
Boltdb: &defaultBoltDb,
Kubernetes: &defaultKubernetes,
Mesos: &defaultMesos,
ECS: &defaultECS,
Rancher: &defaultRancher,
Eureka: &defaultEureka,
DynamoDB: &defaultDynamoDB,
Retry: &configuration.Retry{},
HealthCheck: &healthCheck,
RespondingTimeouts: &respondingTimeouts,
ForwardingTimeouts: &forwardingTimeouts,
TraefikLog: &defaultTraefikLog,
AccessLog: &defaultAccessLog,
}
return &TraefikConfiguration{
@ -209,12 +226,6 @@ func NewTraefikConfiguration() *TraefikConfiguration {
HealthCheck: &configuration.HealthCheckConfig{
Interval: flaeg.Duration(configuration.DefaultHealthCheckInterval),
},
RespondingTimeouts: &configuration.RespondingTimeouts{
IdleTimeout: flaeg.Duration(configuration.DefaultIdleTimeout),
},
ForwardingTimeouts: &configuration.ForwardingTimeouts{
DialTimeout: flaeg.Duration(configuration.DefaultDialTimeout),
},
CheckNewVersion: true,
},
ConfigFile: "",

View file

@ -261,6 +261,7 @@ func (s *ConsulCatalogSuite) TestRefreshConfigWithMultipleNodeWithoutHealthCheck
err = s.registerAgentService("test", nginx.NetworkSettings.IPAddress, 80, []string{"name=nginx1"})
c.Assert(err, checker.IsNil, check.Commentf("Error registering agent service"))
defer s.deregisterAgentService(nginx.NetworkSettings.IPAddress)
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)

View file

@ -52,6 +52,13 @@ func (retry *Retry) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
recorder.responseWriter = rw
retry.next.ServeHTTP(recorder, r.WithContext(newCtx))
// It's a stream request and the body gets already sent to the client.
// Therefore we should not send the response a second time.
if recorder.streamingResponseStarted {
break
}
if !netErrorOccurred || attempts >= retry.attempts {
utils.CopyHeaders(rw.Header(), recorder.Header())
rw.WriteHeader(recorder.Code)
@ -114,8 +121,9 @@ type retryResponseRecorder struct {
HeaderMap http.Header // the HTTP response headers
Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to
responseWriter http.ResponseWriter
err error
responseWriter http.ResponseWriter
err error
streamingResponseStarted bool
}
// newRetryResponseRecorder returns an initialized retryResponseRecorder.
@ -164,6 +172,12 @@ func (rw *retryResponseRecorder) CloseNotify() <-chan bool {
// Flush sends any buffered data to the client.
func (rw *retryResponseRecorder) Flush() {
if !rw.streamingResponseStarted {
utils.CopyHeaders(rw.responseWriter.Header(), rw.Header())
rw.responseWriter.WriteHeader(rw.Code)
rw.streamingResponseStarted = true
}
_, err := rw.responseWriter.Write(rw.Body.Bytes())
if err != nil {
log.Errorf("Error writing response in retryResponseRecorder: %s", err)

View file

@ -218,7 +218,7 @@ func (p *CatalogProvider) watchCatalogServices(stopCh <-chan struct{}, watchCh c
if data != nil {
for key, value := range data {
nodes, _, err := catalog.Service(key, "", options)
nodes, _, err := catalog.Service(key, "", &api.QueryOptions{})
if err != nil {
log.Errorf("Failed to get detail of service %s: %s", key, err)
return
@ -243,16 +243,13 @@ func (p *CatalogProvider) watchCatalogServices(stopCh <-chan struct{}, watchCh c
addedServiceNodeKeys, removedServiceNodeKeys := getChangedServiceNodeKeys(current, flashback)
if len(addedServiceKeys) > 0 || len(addedServiceNodeKeys) > 0 {
log.WithField("DiscoveredServices", addedServiceKeys).Debug("Catalog Services change detected.")
if len(removedServiceKeys) > 0 || len(removedServiceNodeKeys) > 0 || len(addedServiceKeys) > 0 || len(addedServiceNodeKeys) > 0 {
log.WithField("MissingServices", removedServiceKeys).WithField("DiscoveredServices", addedServiceKeys).Debug("Catalog Services change detected.")
watchCh <- data
flashback = current
}
if len(removedServiceKeys) > 0 || len(removedServiceNodeKeys) > 0 {
log.WithField("MissingServices", removedServiceKeys).Debug("Catalog Services change detected.")
watchCh <- data
flashback = current
flashback = make(map[string]Service, len(current))
for key, value := range current {
flashback[key] = value
}
}
}
}

View file

@ -128,7 +128,7 @@ func NewServer(globalConfiguration configuration.GlobalConfiguration) *Server {
// behaviour and backwards compatibility issues.
func createHTTPTransport(globalConfiguration configuration.GlobalConfiguration) *http.Transport {
dialer := &net.Dialer{
Timeout: 30 * time.Second,
Timeout: configuration.DefaultDialTimeout,
KeepAlive: 30 * time.Second,
DualStack: true,
}
@ -676,14 +676,13 @@ func buildServerTimeouts(globalConfig configuration.GlobalConfiguration) (readTi
writeTimeout = time.Duration(globalConfig.RespondingTimeouts.WriteTimeout)
}
// When RespondingTimeouts.IdleTimout is configured, always use that setting
if globalConfig.RespondingTimeouts != nil {
idleTimeout = time.Duration(globalConfig.RespondingTimeouts.IdleTimeout)
} else if globalConfig.IdleTimeout != 0 {
// Backwards compatibility for deprecated IdleTimeout
// Prefer legacy idle timeout parameter for backwards compatibility reasons
if globalConfig.IdleTimeout > 0 {
idleTimeout = time.Duration(globalConfig.IdleTimeout)
log.Warn("top-level idle timeout configuration has been deprecated -- please use responding timeouts")
} else if globalConfig.RespondingTimeouts != nil {
idleTimeout = time.Duration(globalConfig.RespondingTimeouts.IdleTimeout)
} else {
// Default value if neither the deprecated IdleTimeout nor the new RespondingTimeouts.IdleTimout are configured
idleTimeout = time.Duration(configuration.DefaultIdleTimeout)
}

View file

@ -83,7 +83,7 @@ func TestPrepareServerTimeouts(t *testing.T) {
IdleTimeout: flaeg.Duration(80 * time.Second),
},
},
wantIdleTimeout: time.Duration(80 * time.Second),
wantIdleTimeout: time.Duration(45 * time.Second),
wantReadTimeout: time.Duration(0 * time.Second),
wantWriteTimeout: time.Duration(0 * time.Second),
},