chore: update linter.

This commit is contained in:
Ludovic Fernandez 2021-03-04 09:02:03 +01:00 committed by GitHub
parent ec0d03658d
commit 2e7833df49
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
21 changed files with 179 additions and 140 deletions

View file

@ -30,38 +30,64 @@
lines = 230 # default 60
statements = 120 # default 40
[linters-settings.forbidigo]
forbid = [
'^print(ln)?$',
'^spew\.Print(f|ln)?$',
'^spew\.Dump$',
]
[linters-settings.depguard]
list-type = "blacklist"
include-go-root = false
packages = ["github.com/pkg/errors"]
[linters-settings.godox]
keywords = ["FIXME"]
[linters-settings.importas]
corev1 = "k8s.io/api/core/v1"
networkingv1beta1 = "k8s.io/api/networking/v1beta1"
extensionsv1beta1 = "k8s.io/api/extensions/v1beta1"
metav1 = "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeerror = "k8s.io/apimachinery/pkg/api/errors"
[linters]
enable-all = true
disable = [
"scopelint", # Deprecated
"interfacer", # Deprecated
"maligned", # Deprecated
"sqlclosecheck", # Not relevant (SQL)
"rowserrcheck", # Not relevant (SQL)
"lll", # Not relevant
"gocyclo", # FIXME must be fixed
"gosec",
"dupl",
"maligned",
"lll",
"unparam",
"prealloc",
"scopelint",
"cyclop", # Duplicate of gocyclo
"gocognit", # Too strict
"nestif", # Too many false-positive.
"prealloc", # Too many false-positive.
"makezero", # Not relevant
"ifshort", # Not relevant
"dupl", # Too strict
"gosec", # Too strict
"gochecknoinits",
"gochecknoglobals",
"godox",
"gocognit",
"bodyclose", # Too many false-positive and panics.
"wsl", # Too strict
"nlreturn", # Not relevant
"gomnd", # Too strict
"stylecheck", # skip because report issues related to some generated files.
"testpackage", # Too strict
"goerr113", # Too strict
"nestif", # Too many false-positive.
"noctx", # Too strict
"exhaustive", # Too strict
"nlreturn", # Not relevant
"wrapcheck", # Too strict
"tparallel", # Not relevant
"paralleltest", # Not relevant
"exhaustive", # Not relevant
"exhaustivestruct", # Not relevant
"makezero", # not relevant
"forbidigo", # not relevant
"ifshort", # not relevant
"goerr113", # Too strict
"wrapcheck", # Too strict
"noctx", # Too strict
"bodyclose", # Too many false-positive and panics.
"unparam", # Too strict
"godox", # Too strict
"forcetypeassert", # Too strict
]
[issues]
@ -69,9 +95,9 @@
max-per-linter = 0
max-same-issues = 0
exclude = [
"SA1019: http.CloseNotifier is deprecated: the CloseNotifier interface predates Go's context package. New code should use Request.Context instead.", # FIXME must be fixed
"Error return value of .((os\\.)?std(out|err)\\..*|.*Close|.*Flush|os\\.Remove(All)?|.*printf?|os\\.(Un)?Setenv). is not checked",
"should have a package comment, unless it's in another file for this package",
"SA1019: http.CloseNotifier has been deprecated", # FIXME must be fixed
]
[[issues.exclude-rules]]
path = "(.+)_test.go"
@ -88,18 +114,12 @@
[[issues.exclude-rules]]
path = "pkg/h2c/h2c.go"
text = "Error return value of `rw.Write` is not checked"
[[issues.exclude-rules]]
path = "pkg/middlewares/recovery/recovery.go"
text = "`logger` can be `github.com/stretchr/testify/assert.TestingT`"
[[issues.exclude-rules]]
path = "pkg/provider/docker/builder_test.go"
text = "(U1000: func )?`(.+)` is unused"
[[issues.exclude-rules]]
path = "pkg/provider/kubernetes/builder_(endpoint|service)_test.go"
text = "(U1000: func )?`(.+)` is unused"
[[issues.exclude-rules]]
path = "pkg/config/parser/.+_test.go"
text = "U1000: field `(foo|fuu)` is unused"
[[issues.exclude-rules]]
path = "pkg/server/service/bufferpool.go"
text = "SA6002: argument should be pointer-like to avoid allocations"
@ -109,9 +129,6 @@
[[issues.exclude-rules]]
path = "pkg/server/middleware/middlewares.go"
text = "Function 'buildConstructor' has too many statements"
[[issues.exclude-rules]] # FIXME must be fixed
path = "cmd/context.go"
text = "S1000: should use a simple channel send/receive instead of `select` with a single case"
[[issues.exclude-rules]]
path = "pkg/tracing/haystack/logger.go"
linters = ["goprintffuncname"]

View file

@ -63,18 +63,18 @@ generate-webui: build-webui-image
mkdir -p static; \
docker run --rm -v "$$PWD/static":'/src/static' traefik-webui npm run build:nc; \
docker run --rm -v "$$PWD/static":'/src/static' traefik-webui chown -R $(shell id -u):$(shell id -g) ../static; \
echo 'For more informations show `webui/readme.md`' > $$PWD/static/DONT-EDIT-FILES-IN-THIS-DIRECTORY.md; \
echo 'For more information show `webui/readme.md`' > $$PWD/static/DONT-EDIT-FILES-IN-THIS-DIRECTORY.md; \
fi
## Build the linux binary
binary: generate-webui $(PRE_TARGET)
$(if $(PRE_TARGET),$(DOCKER_RUN_TRAEFIK)) ./script/make.sh generate binary
## Build the binary for the standard plaforms (linux, darwin, windows)
## Build the binary for the standard platforms (linux, darwin, windows)
crossbinary-default: generate-webui build-dev-image
$(DOCKER_RUN_TRAEFIK_NOTTY) ./script/make.sh generate crossbinary-default
## Build the binary for the standard plaforms (linux, darwin, windows) in parallel
## Build the binary for the standard platforms (linux, darwin, windows) in parallel
crossbinary-default-parallel:
$(MAKE) generate-webui
$(MAKE) build-dev-image crossbinary-default

View file

@ -19,7 +19,7 @@ RUN mkdir -p /usr/local/bin \
&& chmod +x /usr/local/bin/go-bindata
# Download golangci-lint binary to bin folder in $GOPATH
RUN curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $GOPATH/bin v1.36.0
RUN curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $GOPATH/bin v1.38.0
# Download misspell binary to bin folder in $GOPATH
RUN curl -sfL https://raw.githubusercontent.com/client9/misspell/master/install-misspell.sh | bash -s -- -b $GOPATH/bin v0.3.4

View file

@ -13,10 +13,8 @@ func ContextWithSignal(ctx context.Context) context.Context {
signals := make(chan os.Signal)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
go func() {
select {
case <-signals:
cancel()
}
<-signals
cancel()
}()
return newCtx
}

View file

@ -366,30 +366,32 @@ func initACMEProvider(c *static.Configuration, providerAggregator *aggregator.Pr
var resolvers []*acme.Provider
for name, resolver := range c.CertificatesResolvers {
if resolver.ACME != nil {
if localStores[resolver.ACME.Storage] == nil {
localStores[resolver.ACME.Storage] = acme.NewLocalStore(resolver.ACME.Storage)
}
p := &acme.Provider{
Configuration: resolver.ACME,
Store: localStores[resolver.ACME.Storage],
ResolverName: name,
HTTPChallengeProvider: httpChallengeProvider,
TLSChallengeProvider: tlsChallengeProvider,
}
if err := providerAggregator.AddProvider(p); err != nil {
log.WithoutContext().Errorf("The ACME resolver %q is skipped from the resolvers list because: %v", name, err)
continue
}
p.SetTLSManager(tlsManager)
p.SetConfigListenerChan(make(chan dynamic.Configuration))
resolvers = append(resolvers, p)
if resolver.ACME == nil {
continue
}
if localStores[resolver.ACME.Storage] == nil {
localStores[resolver.ACME.Storage] = acme.NewLocalStore(resolver.ACME.Storage)
}
p := &acme.Provider{
Configuration: resolver.ACME,
Store: localStores[resolver.ACME.Storage],
ResolverName: name,
HTTPChallengeProvider: httpChallengeProvider,
TLSChallengeProvider: tlsChallengeProvider,
}
if err := providerAggregator.AddProvider(p); err != nil {
log.WithoutContext().Errorf("The ACME resolver %q is skipped from the resolvers list because: %v", name, err)
continue
}
p.SetTLSManager(tlsManager)
p.SetConfigListenerChan(make(chan dynamic.Configuration))
resolvers = append(resolvers, p)
}
return resolvers

View file

@ -112,7 +112,7 @@ func callHelloClientGRPC(name string, secure bool) (string, error) {
} else {
client, closer, err = getHelloClientGRPCh2c()
}
defer closer()
defer func() { _ = closer() }()
if err != nil {
return "", err
@ -139,6 +139,7 @@ func callStreamExampleClientGRPC() (helloworld.Greeter_StreamExampleClient, func
func (s *GRPCSuite) TestGRPC(c *check.C) {
lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
_, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil)
@ -181,6 +182,7 @@ func (s *GRPCSuite) TestGRPC(c *check.C) {
func (s *GRPCSuite) TestGRPCh2c(c *check.C) {
lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
_, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil)
@ -219,6 +221,7 @@ func (s *GRPCSuite) TestGRPCh2c(c *check.C) {
func (s *GRPCSuite) TestGRPCh2cTermination(c *check.C) {
lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
_, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil)
@ -261,6 +264,7 @@ func (s *GRPCSuite) TestGRPCh2cTermination(c *check.C) {
func (s *GRPCSuite) TestGRPCInsecure(c *check.C) {
lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
_, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil)
@ -340,7 +344,7 @@ func (s *GRPCSuite) TestGRPCBuffer(c *check.C) {
c.Assert(err, check.IsNil)
var client helloworld.Greeter_StreamExampleClient
client, closer, err := callStreamExampleClientGRPC()
defer closer()
defer func() { _ = closer() }()
c.Assert(err, check.IsNil)
received := make(chan bool)
@ -400,8 +404,10 @@ func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) {
var client helloworld.Greeter_StreamExampleClient
client, closer, err := callStreamExampleClientGRPC()
defer closer()
defer func() { stopStreamExample <- true }()
defer func() {
_ = closer()
stopStreamExample <- true
}()
c.Assert(err, check.IsNil)
received := make(chan bool)
@ -425,6 +431,7 @@ func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) {
func (s *GRPCSuite) TestGRPCWithRetry(c *check.C) {
lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
_, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil)

View file

@ -203,10 +203,10 @@ func (s *RestSuite) TestSimpleConfiguration(c *check.C) {
}
for _, test := range testCase {
json, err := json.Marshal(test.config)
data, err := json.Marshal(test.config)
c.Assert(err, checker.IsNil)
request, err := http.NewRequest(http.MethodPut, "http://127.0.0.1:8000/secure/api/providers/rest", bytes.NewReader(json))
request, err := http.NewRequest(http.MethodPut, "http://127.0.0.1:8000/secure/api/providers/rest", bytes.NewReader(data))
c.Assert(err, checker.IsNil)
response, err := http.DefaultClient.Do(request)

View file

@ -120,7 +120,7 @@ func Do(timeout time.Duration, operation DoCondition) error {
fmt.Print("*")
if err = operation(); err == nil {
fmt.Println("+")
return err
return nil
}
}
}

View file

@ -461,8 +461,7 @@ func (s *WebsocketSuite) TestSSLhttp2(c *check.C) {
}))
ts.TLS = &tls.Config{}
ts.TLS.NextProtos = append(ts.TLS.NextProtos, `h2`)
ts.TLS.NextProtos = append(ts.TLS.NextProtos, `http/1.1`)
ts.TLS.NextProtos = append(ts.TLS.NextProtos, `h2`, `http/1.1`)
ts.StartTLS()
file := s.adaptFile(c, "fixtures/websocket/config_https.toml", struct {

View file

@ -40,7 +40,7 @@ func Do(baseConfig interface{}, indent bool) (string, error) {
}
func doOnJSON(input string) string {
mailExp := regexp.MustCompile(`\w[-._\w]*\w@\w[-._\w]*\w\.\w{2,3}"`)
mailExp := regexp.MustCompile(`\w[-.\w]*\w@\w[-.\w]*\w\.\w{2,3}"`)
return xurls.Relaxed().ReplaceAllString(mailExp.ReplaceAllString(input, maskLarge+"\""), maskLarge)
}

View file

@ -79,7 +79,7 @@ func loadConfigFiles(configFile string, element interface{}) (string, error) {
return "", nil
}
if err = file.Decode(filePath, element); err != nil {
if err := file.Decode(filePath, element); err != nil {
return "", err
}
return filePath, nil

View file

@ -24,9 +24,21 @@ func TestJobBackOff(t *testing.T) {
exp.MinJobInterval = testMinJobInterval
exp.Reset()
expectedResults := []time.Duration{500, 500, 500, 1000, 2000, 4000, 5000, 5000, 500, 1000, 2000, 4000, 5000, 5000}
for i, d := range expectedResults {
expectedResults[i] = d * time.Millisecond
expectedResults := []time.Duration{
500 * time.Millisecond,
500 * time.Millisecond,
500 * time.Millisecond,
1 * time.Second,
2 * time.Second,
4 * time.Second,
5 * time.Second,
5 * time.Second,
500 * time.Millisecond,
1 * time.Second,
2 * time.Second,
4 * time.Second,
5 * time.Second,
5 * time.Second,
}
for i, expected := range expectedResults {

View file

@ -390,7 +390,7 @@ func newCollector(metricName string, labels stdprometheus.Labels, c stdprometheu
// collector wraps a Collector object from the Prometheus client library.
// It adds information on how many generations this metric should be present
// in the /metrics output, relatived to the time it was last tracked.
// in the /metrics output, relative to the time it was last tracked.
type collector struct {
id string
labels stdprometheus.Labels

View file

@ -711,10 +711,10 @@ func assertValidLogData(t *testing.T, expected string, logData []byte) {
assert.Equal(t, resultExpected[OriginContentSize], result[OriginContentSize], formatErrMessage)
assert.Equal(t, resultExpected[RequestRefererHeader], result[RequestRefererHeader], formatErrMessage)
assert.Equal(t, resultExpected[RequestUserAgentHeader], result[RequestUserAgentHeader], formatErrMessage)
assert.Regexp(t, regexp.MustCompile("[0-9]*"), result[RequestCount], formatErrMessage)
assert.Regexp(t, regexp.MustCompile(`\d*`), result[RequestCount], formatErrMessage)
assert.Equal(t, resultExpected[RouterName], result[RouterName], formatErrMessage)
assert.Equal(t, resultExpected[ServiceURL], result[ServiceURL], formatErrMessage)
assert.Regexp(t, regexp.MustCompile("[0-9]*ms"), result[Duration], formatErrMessage)
assert.Regexp(t, regexp.MustCompile(`\d*ms`), result[Duration], formatErrMessage)
}
func captureStdout(t *testing.T) (out *os.File, restoreStdout func()) {

View file

@ -92,39 +92,42 @@ func (c *customErrors) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
// check the recorder code against the configured http status code ranges
code := catcher.getCode()
for _, block := range c.httpCodeRanges {
if code >= block[0] && code <= block[1] {
logger.Debugf("Caught HTTP Status Code %d, returning error page", code)
if code < block[0] || code > block[1] {
continue
}
var query string
if len(c.backendQuery) > 0 {
query = "/" + strings.TrimPrefix(c.backendQuery, "/")
query = strings.ReplaceAll(query, "{status}", strconv.Itoa(code))
}
logger.Debugf("Caught HTTP Status Code %d, returning error page", code)
pageReq, err := newRequest(backendURL + query)
if err != nil {
logger.Error(err)
rw.WriteHeader(code)
_, err = fmt.Fprint(rw, http.StatusText(code))
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
}
return
}
var query string
if len(c.backendQuery) > 0 {
query = "/" + strings.TrimPrefix(c.backendQuery, "/")
query = strings.ReplaceAll(query, "{status}", strconv.Itoa(code))
}
recorderErrorPage := newResponseRecorder(ctx, rw)
utils.CopyHeaders(pageReq.Header, req.Header)
c.backendHandler.ServeHTTP(recorderErrorPage, pageReq.WithContext(req.Context()))
utils.CopyHeaders(rw.Header(), recorderErrorPage.Header())
pageReq, err := newRequest(backendURL + query)
if err != nil {
logger.Error(err)
rw.WriteHeader(code)
if _, err = rw.Write(recorderErrorPage.GetBody().Bytes()); err != nil {
logger.Error(err)
_, err = fmt.Fprint(rw, http.StatusText(code))
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
}
return
}
recorderErrorPage := newResponseRecorder(ctx, rw)
utils.CopyHeaders(pageReq.Header, req.Header)
c.backendHandler.ServeHTTP(recorderErrorPage, pageReq.WithContext(req.Context()))
utils.CopyHeaders(rw.Header(), recorderErrorPage.Header())
rw.WriteHeader(code)
if _, err = rw.Write(recorderErrorPage.GetBody().Bytes()); err != nil {
logger.Error(err)
}
return
}
}

View file

@ -27,12 +27,12 @@ func (n MockTracer) Extract(format, carrier interface{}) (opentracing.SpanContex
return nil, opentracing.ErrSpanContextNotFound
}
// MockSpanContext.
// MockSpanContext a span context mock.
type MockSpanContext struct{}
func (n MockSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
// MockSpan.
// MockSpan a span mock.
type MockSpan struct {
OpName string
Tags map[string]interface{}

View file

@ -541,23 +541,24 @@ func (p *Provider) makeGatewayStatus(listenerStatuses []v1alpha1.ListenerStatus)
gatewayStatus.Listeners = listenerStatuses
// update "Scheduled" status with "ResourcesAvailable" reason
gatewayStatus.Conditions = append(gatewayStatus.Conditions, metav1.Condition{
Type: string(v1alpha1.GatewayConditionScheduled),
Status: metav1.ConditionTrue,
Reason: "ResourcesAvailable",
Message: "Resources available",
LastTransitionTime: metav1.Now(),
})
// update "Ready" status with "ListenersValid" reason
gatewayStatus.Conditions = append(gatewayStatus.Conditions, metav1.Condition{
Type: string(v1alpha1.GatewayConditionReady),
Status: metav1.ConditionTrue,
Reason: "ListenersValid",
Message: "Listeners valid",
LastTransitionTime: metav1.Now(),
})
gatewayStatus.Conditions = append(gatewayStatus.Conditions,
// update "Scheduled" status with "ResourcesAvailable" reason
metav1.Condition{
Type: string(v1alpha1.GatewayConditionScheduled),
Status: metav1.ConditionTrue,
Reason: "ResourcesAvailable",
Message: "Resources available",
LastTransitionTime: metav1.Now(),
},
// update "Ready" status with "ListenersValid" reason
metav1.Condition{
Type: string(v1alpha1.GatewayConditionReady),
Status: metav1.ConditionTrue,
Reason: "ListenersValid",
Message: "Listeners valid",
LastTransitionTime: metav1.Now(),
},
)
return gatewayStatus, nil
}

View file

@ -341,7 +341,7 @@ func (c *clientWrapper) updateIngressStatusOld(src *networkingv1beta1.Ingress, i
}
// isLoadBalancerIngressEquals returns true if the given slices are equal, false otherwise.
func isLoadBalancerIngressEquals(aSlice []corev1.LoadBalancerIngress, bSlice []corev1.LoadBalancerIngress) bool {
func isLoadBalancerIngressEquals(aSlice, bSlice []corev1.LoadBalancerIngress) bool {
if len(aSlice) != len(bSlice) {
return false
}

View file

@ -341,11 +341,11 @@ func (ln tcpKeepAliveListener) Accept() (net.Conn, error) {
return nil, err
}
if err = tc.SetKeepAlive(true); err != nil {
if err := tc.SetKeepAlive(true); err != nil {
return nil, err
}
if err = tc.SetKeepAlivePeriod(3 * time.Minute); err != nil {
if err := tc.SetKeepAlivePeriod(3 * time.Minute); err != nil {
// Some systems, such as OpenBSD, have no user-settable per-socket TCP
// keepalive options.
if !errors.Is(err, syscall.ENOPROTOOPT) {

View file

@ -147,8 +147,8 @@ func (b blackHoleResponseWriter) Header() http.Header {
return http.Header{}
}
func (b blackHoleResponseWriter) Write(bytes []byte) (int, error) {
return len(bytes), nil
func (b blackHoleResponseWriter) Write(data []byte) (int, error) {
return len(data), nil
}
func (b blackHoleResponseWriter) WriteHeader(statusCode int) {}

View file

@ -24,6 +24,19 @@ type stickyCookie struct {
httpOnly bool
}
// Balancer is a WeightedRoundRobin load balancer based on Earliest Deadline First (EDF).
// (https://en.wikipedia.org/wiki/Earliest_deadline_first_scheduling)
// Each pick from the schedule has the earliest deadline entry selected.
// Entries have deadlines set at currentDeadline + 1 / weight,
// providing weighted round robin behavior with floating point weights and an O(log n) pick time.
type Balancer struct {
stickyCookie *stickyCookie
mutex sync.RWMutex
handlers []*namedHandler
curDeadline float64
}
// New creates a new load balancer.
func New(sticky *dynamic.Sticky) *Balancer {
balancer := &Balancer{}
@ -68,19 +81,6 @@ func (b *Balancer) Pop() interface{} {
return h
}
// Balancer is a WeightedRoundRobin load balancer based on Earliest Deadline First (EDF).
// (https://en.wikipedia.org/wiki/Earliest_deadline_first_scheduling)
// Each pick from the schedule has the earliest deadline entry selected.
// Entries have deadlines set at currentDeadline + 1 / weight,
// providing weighted round robin behavior with floating point weights and an O(log n) pick time.
type Balancer struct {
stickyCookie *stickyCookie
mutex sync.RWMutex
handlers []*namedHandler
curDeadline float64
}
func (b *Balancer) nextServer() (*namedHandler, error) {
b.mutex.Lock()
defer b.mutex.Unlock()