diff --git a/Gopkg.lock b/Gopkg.lock index 17f8591c3..2c2bb82e4 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -598,7 +598,7 @@ revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee" [[projects]] - digest = "1:a04af13190b67ff69cf8fcd79ee133a24c4a7a900cacbc296261dd43f3fbde5c" + digest = "1:68a7713d996a30a8394715220e779bbcbc880a18b1e0ab0e12fd1fbbf9c711e6" name = "github.com/go-acme/lego" packages = [ "acme", @@ -683,8 +683,8 @@ "registration", ] pruneopts = "NUT" - revision = "aaecc1ca7254190b71c5f01f57ee3bb6701bc937" - version = "v2.4.0" + revision = "3d13faf68920543a393ad6cdfdea429627af2d34" + version = "v2.5.0" [[projects]] branch = "fork-containous" @@ -1416,17 +1416,18 @@ revision = "256dc444b735e061061cf46c809487313d5b0065" [[projects]] - branch = "master" - digest = "1:fff470b0a7bbf05cfe8bfc73bfdf4d21eb009ea84e601f3d27781474e5da960f" + digest = "1:253f275bd72c42f8d234712d1574c8b222fe9b72838bfaca11b21ace9c0e3d0a" name = "github.com/sacloud/libsacloud" packages = [ ".", "api", "sacloud", "sacloud/ostype", + "utils/mutexkv", ] pruneopts = "NUT" - revision = "306ea89b6ef19334614f7b0fc5aa19595022bb8c" + revision = "41c392dee98a83260abbe0fcd5c13beb7c75d103" + version = "v1.21.1" [[projects]] digest = "1:6bc0652ea6e39e22ccd522458b8bdd8665bf23bdc5a20eec90056e4dc7e273ca" @@ -1626,6 +1627,42 @@ pruneopts = "NUT" revision = "0c8571ac0ce161a5feb57375a9cdf148c98c0f70" +[[projects]] + digest = "1:aafe0319af5410fb19a23a575ea6ee4b14253e122ef87f936bac65ea1e6b280c" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "NUT" + revision = "df6e2001952312404b06f5f6f03fcb4aec1648e5" + version = "v0.21.0" + +[[projects]] + branch = "master" + digest = "1:02fe59517e10f9b400b500af8ac228c74cecb0cba7a5f438d8283edb97e14270" + name = "go.uber.org/ratelimit" + packages = [ + ".", + "internal/clock", + ] + pruneopts = "NUT" + revision = "c15da02342779cb6dc027fc95ee2277787698f36" + [[projects]] branch = "master" digest = "1:30c1930f8c9fee79f3af60c8b7cd92edd12a4f22187f5527d53509b1a794f555" @@ -1744,16 +1781,21 @@ [[projects]] branch = "master" - digest = "1:da32ebe70dd3ec97d2df26281b08b18d05c2f12491ae79f389813f6c8d3006b3" + digest = "1:70c173b8ecc111dd01dc07f0ada72c076e4ed91618ee559312ef8adf154cc539" name = "google.golang.org/api" packages = [ "dns/v1", "gensupport", "googleapi", "googleapi/internal/uritemplates", + "googleapi/transport", + "internal", + "option", + "transport/http", + "transport/http/internal/propagation", ] pruneopts = "NUT" - revision = "de943baf05a022a8f921b544b7827bacaba1aed5" + revision = "067bed655e9cbc26f4dbac8f8897b30756d90990" [[projects]] digest = "1:7206d98ec77c90c72ec2c405181a1dcf86965803b6dbc4f98ceab7a5047c37a9" diff --git a/Gopkg.toml b/Gopkg.toml index b6a718318..6d385f95f 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -193,9 +193,8 @@ required = [ name = "github.com/vulcand/oxy" [[constraint]] -# branch = "master" name = "github.com/go-acme/lego" - version = "2.4.0" + version = "2.5.0" [[constraint]] name = "google.golang.org/grpc" diff --git a/vendor/github.com/go-acme/lego/acme/api/internal/secure/jws.go b/vendor/github.com/go-acme/lego/acme/api/internal/secure/jws.go index 72c5a4016..213aeda0a 100644 --- a/vendor/github.com/go-acme/lego/acme/api/internal/secure/jws.go +++ b/vendor/github.com/go-acme/lego/acme/api/internal/secure/jws.go @@ -6,7 +6,6 @@ import ( "crypto/elliptic" "crypto/rsa" "encoding/base64" - "errors" "fmt" "github.com/go-acme/lego/acme/api/internal/nonces" @@ -118,9 +117,6 @@ func (j *JWS) GetKeyAuthorization(token string) (string, error) { // Generate the Key Authorization for the challenge jwk := &jose.JSONWebKey{Key: publicKey} - if jwk == nil { - return "", errors.New("could not generate JWK from key") - } thumbBytes, err := jwk.Thumbprint(crypto.SHA256) if err != nil { diff --git a/vendor/github.com/go-acme/lego/acme/api/internal/sender/useragent.go b/vendor/github.com/go-acme/lego/acme/api/internal/sender/useragent.go index 822ea5b11..f01719547 100644 --- a/vendor/github.com/go-acme/lego/acme/api/internal/sender/useragent.go +++ b/vendor/github.com/go-acme/lego/acme/api/internal/sender/useragent.go @@ -5,7 +5,7 @@ package sender const ( // ourUserAgent is the User-Agent of this underlying library package. - ourUserAgent = "xenolf-acme/2.4.0" + ourUserAgent = "xenolf-acme/2.5.0" // ourUserAgentComment is part of the UA comment linked to the version status of this underlying library package. // values: detach|release diff --git a/vendor/github.com/go-acme/lego/certificate/certificates.go b/vendor/github.com/go-acme/lego/certificate/certificates.go index f578b5880..b0327d5c1 100644 --- a/vendor/github.com/go-acme/lego/certificate/certificates.go +++ b/vendor/github.com/go-acme/lego/certificate/certificates.go @@ -114,6 +114,7 @@ func (c *Certifier) Obtain(request ObtainRequest) (*Resource, error) { err = c.resolver.Solve(authz) if err != nil { // If any challenge fails, return. Do not generate partial SAN certificates. + c.deactivateAuthorizations(order) return nil, err } @@ -170,6 +171,7 @@ func (c *Certifier) ObtainForCSR(csr x509.CertificateRequest, bundle bool) (*Res err = c.resolver.Solve(authz) if err != nil { // If any challenge fails, return. Do not generate partial SAN certificates. + c.deactivateAuthorizations(order) return nil, err } diff --git a/vendor/github.com/go-acme/lego/providers/dns/cloudns/cloudns.go b/vendor/github.com/go-acme/lego/providers/dns/cloudns/cloudns.go index 2d5c5afef..a33d3f099 100644 --- a/vendor/github.com/go-acme/lego/providers/dns/cloudns/cloudns.go +++ b/vendor/github.com/go-acme/lego/providers/dns/cloudns/cloudns.go @@ -27,7 +27,7 @@ func NewDefaultConfig() *Config { return &Config{ PropagationTimeout: env.GetOrDefaultSecond("CLOUDNS_PROPAGATION_TIMEOUT", 120*time.Second), PollingInterval: env.GetOrDefaultSecond("CLOUDNS_POLLING_INTERVAL", 4*time.Second), - TTL: env.GetOrDefaultInt("CLOUDNS_TTL", dns01.DefaultTTL), + TTL: env.GetOrDefaultInt("CLOUDNS_TTL", 60), HTTPClient: &http.Client{ Timeout: env.GetOrDefaultSecond("CLOUDNS_HTTP_TIMEOUT", 30*time.Second), }, @@ -64,7 +64,7 @@ func NewDNSProviderConfig(config *Config) (*DNSProvider, error) { client, err := internal.NewClient(config.AuthID, config.AuthPassword) if err != nil { - return nil, err + return nil, fmt.Errorf("ClouDNS: %v", err) } client.HTTPClient = config.HTTPClient @@ -78,10 +78,15 @@ func (d *DNSProvider) Present(domain, token, keyAuth string) error { zone, err := d.client.GetZone(fqdn) if err != nil { - return err + return fmt.Errorf("ClouDNS: %v", err) } - return d.client.AddTxtRecord(zone.Name, fqdn, value, d.config.TTL) + err = d.client.AddTxtRecord(zone.Name, fqdn, value, d.config.TTL) + if err != nil { + return fmt.Errorf("ClouDNS: %v", err) + } + + return nil } // CleanUp removes the TXT record matching the specified parameters. @@ -90,15 +95,23 @@ func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { zone, err := d.client.GetZone(fqdn) if err != nil { - return err + return fmt.Errorf("ClouDNS: %v", err) } record, err := d.client.FindTxtRecord(zone.Name, fqdn) if err != nil { - return err + return fmt.Errorf("ClouDNS: %v", err) } - return d.client.RemoveTxtRecord(record.ID, zone.Name) + if record == nil { + return nil + } + + err = d.client.RemoveTxtRecord(record.ID, zone.Name) + if err != nil { + return fmt.Errorf("ClouDNS: %v", err) + } + return nil } // Timeout returns the timeout and interval to use when checking for DNS propagation. diff --git a/vendor/github.com/go-acme/lego/providers/dns/cloudns/internal/client.go b/vendor/github.com/go-acme/lego/providers/dns/cloudns/internal/client.go index fc6e63315..92620e935 100644 --- a/vendor/github.com/go-acme/lego/providers/dns/cloudns/internal/client.go +++ b/vendor/github.com/go-acme/lego/providers/dns/cloudns/internal/client.go @@ -2,6 +2,7 @@ package internal import ( "encoding/json" + "errors" "fmt" "io/ioutil" "net/http" @@ -14,6 +15,11 @@ import ( const defaultBaseURL = "https://api.cloudns.net/dns/" +type apiResponse struct { + Status string `json:"status"` + StatusDescription string `json:"statusDescription"` +} + type Zone struct { Name string Type string @@ -37,11 +43,11 @@ type TXTRecords map[string]TXTRecord // NewClient creates a ClouDNS client func NewClient(authID string, authPassword string) (*Client, error) { if authID == "" { - return nil, fmt.Errorf("ClouDNS: credentials missing: authID") + return nil, fmt.Errorf("credentials missing: authID") } if authPassword == "" { - return nil, fmt.Errorf("ClouDNS: credentials missing: authPassword") + return nil, fmt.Errorf("credentials missing: authPassword") } baseURL, err := url.Parse(defaultBaseURL) @@ -90,7 +96,7 @@ func (c *Client) GetZone(authFQDN string) (*Zone, error) { if len(result) > 0 { if err = json.Unmarshal(result, &zone); err != nil { - return nil, fmt.Errorf("ClouDNS: zone unmarshaling error: %v", err) + return nil, fmt.Errorf("zone unmarshaling error: %v", err) } } @@ -98,7 +104,7 @@ func (c *Client) GetZone(authFQDN string) (*Zone, error) { return &zone, nil } - return nil, fmt.Errorf("ClouDNS: zone %s not found for authFQDN %s", authZoneName, authFQDN) + return nil, fmt.Errorf("zone %s not found for authFQDN %s", authZoneName, authFQDN) } // FindTxtRecord return the TXT record a zone ID and a FQDN @@ -119,9 +125,14 @@ func (c *Client) FindTxtRecord(zoneName, fqdn string) (*TXTRecord, error) { return nil, err } + // the API returns [] when there is no records. + if string(result) == "[]" { + return nil, nil + } + var records TXTRecords if err = json.Unmarshal(result, &records); err != nil { - return nil, fmt.Errorf("ClouDNS: TXT record unmarshaling error: %v", err) + return nil, fmt.Errorf("TXT record unmarshaling error: %v: %s", err, string(result)) } for _, record := range records { @@ -130,7 +141,7 @@ func (c *Client) FindTxtRecord(zoneName, fqdn string) (*TXTRecord, error) { } } - return nil, fmt.Errorf("ClouDNS: no existing record found for %q", fqdn) + return nil, nil } // AddTxtRecord add a TXT record @@ -144,12 +155,25 @@ func (c *Client) AddTxtRecord(zoneName string, fqdn, value string, ttl int) erro q.Add("domain-name", zoneName) q.Add("host", host) q.Add("record", value) - q.Add("ttl", strconv.Itoa(ttl)) + q.Add("ttl", strconv.Itoa(ttlRounder(ttl))) q.Add("record-type", "TXT") reqURL.RawQuery = q.Encode() - _, err := c.doRequest(http.MethodPost, &reqURL) - return err + raw, err := c.doRequest(http.MethodPost, &reqURL) + if err != nil { + return err + } + + resp := apiResponse{} + if err = json.Unmarshal(raw, &resp); err != nil { + return fmt.Errorf("apiResponse unmarshaling error: %v: %s", err, string(raw)) + } + + if resp.Status != "Success" { + return fmt.Errorf("fail to add TXT record: %s %s", resp.Status, resp.StatusDescription) + } + + return nil } // RemoveTxtRecord remove a TXT record @@ -162,8 +186,21 @@ func (c *Client) RemoveTxtRecord(recordID int, zoneName string) error { q.Add("record-id", strconv.Itoa(recordID)) reqURL.RawQuery = q.Encode() - _, err := c.doRequest(http.MethodPost, &reqURL) - return err + raw, err := c.doRequest(http.MethodPost, &reqURL) + if err != nil { + return err + } + + resp := apiResponse{} + if err = json.Unmarshal(raw, &resp); err != nil { + return fmt.Errorf("apiResponse unmarshaling error: %v: %s", err, string(raw)) + } + + if resp.Status != "Success" { + return fmt.Errorf("fail to add TXT record: %s %s", resp.Status, resp.StatusDescription) + } + + return nil } func (c *Client) doRequest(method string, url *url.URL) (json.RawMessage, error) { @@ -174,18 +211,18 @@ func (c *Client) doRequest(method string, url *url.URL) (json.RawMessage, error) resp, err := c.HTTPClient.Do(req) if err != nil { - return nil, fmt.Errorf("ClouDNS: %v", err) + return nil, err } defer resp.Body.Close() content, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("ClouDNS: %s", toUnreadableBodyMessage(req, content)) + return nil, errors.New(toUnreadableBodyMessage(req, content)) } if resp.StatusCode != 200 { - return nil, fmt.Errorf("ClouDNS: invalid code (%v), error: %s", resp.StatusCode, content) + return nil, fmt.Errorf("invalid code (%v), error: %s", resp.StatusCode, content) } return content, nil } @@ -198,7 +235,7 @@ func (c *Client) buildRequest(method string, url *url.URL) (*http.Request, error req, err := http.NewRequest(method, url.String(), nil) if err != nil { - return nil, fmt.Errorf("ClouDNS: invalid request: %v", err) + return nil, fmt.Errorf("invalid request: %v", err) } return req, nil @@ -207,3 +244,28 @@ func (c *Client) buildRequest(method string, url *url.URL) (*http.Request, error func toUnreadableBodyMessage(req *http.Request, rawBody []byte) string { return fmt.Sprintf("the request %s sent a response with a body which is an invalid format: %q", req.URL, string(rawBody)) } + +// https://www.cloudns.net/wiki/article/58/ +// Available TTL's: +// 60 = 1 minute +// 300 = 5 minutes +// 900 = 15 minutes +// 1800 = 30 minutes +// 3600 = 1 hour +// 21600 = 6 hours +// 43200 = 12 hours +// 86400 = 1 day +// 172800 = 2 days +// 259200 = 3 days +// 604800 = 1 week +// 1209600 = 2 weeks +// 2592000 = 1 month +func ttlRounder(ttl int) int { + for _, validTTL := range []int{60, 300, 900, 1800, 3600, 21600, 43200, 86400, 172800, 259200, 604800, 1209600} { + if ttl <= validTTL { + return validTTL + } + } + + return 2592000 +} diff --git a/vendor/github.com/go-acme/lego/providers/dns/gandiv5/client.go b/vendor/github.com/go-acme/lego/providers/dns/gandiv5/client.go index d583f1c14..5c2c2485e 100644 --- a/vendor/github.com/go-acme/lego/providers/dns/gandiv5/client.go +++ b/vendor/github.com/go-acme/lego/providers/dns/gandiv5/client.go @@ -47,13 +47,13 @@ func (d *DNSProvider) addTXTRecord(domain string, name string, value string, ttl return err } - message := &apiResponse{} - err = d.do(req, message) + message := apiResponse{} + err = d.do(req, &message) if err != nil { return fmt.Errorf("unable to create TXT record for domain %s and name %s: %v", domain, name, err) } - if message != nil && len(message.Message) > 0 { + if len(message.Message) > 0 { log.Infof("API response: %s", message.Message) } @@ -87,13 +87,13 @@ func (d *DNSProvider) deleteTXTRecord(domain string, name string) error { return err } - message := &apiResponse{} - err = d.do(req, message) + message := apiResponse{} + err = d.do(req, &message) if err != nil { return fmt.Errorf("unable to delete TXT record for domain %s and name %s: %v", domain, name, err) } - if message != nil && len(message.Message) > 0 { + if len(message.Message) > 0 { log.Infof("API response: %s", message.Message) } diff --git a/vendor/github.com/go-acme/lego/providers/dns/gcloud/googlecloud.go b/vendor/github.com/go-acme/lego/providers/dns/gcloud/googlecloud.go index 8b3bab02b..2f3b6dff3 100644 --- a/vendor/github.com/go-acme/lego/providers/dns/gcloud/googlecloud.go +++ b/vendor/github.com/go-acme/lego/providers/dns/gcloud/googlecloud.go @@ -18,6 +18,7 @@ import ( "golang.org/x/oauth2/google" "google.golang.org/api/dns/v1" "google.golang.org/api/googleapi" + "google.golang.org/api/option" ) const ( @@ -139,8 +140,11 @@ func NewDNSProviderConfig(config *Config) (*DNSProvider, error) { if config == nil { return nil, errors.New("googlecloud: the configuration of the DNS provider is nil") } + if config.HTTPClient == nil { + return nil, fmt.Errorf("googlecloud: unable to create Google Cloud DNS service: client is nil") + } - svc, err := dns.New(config.HTTPClient) + svc, err := dns.NewService(context.Background(), option.WithHTTPClient(config.HTTPClient)) if err != nil { return nil, fmt.Errorf("googlecloud: unable to create Google Cloud DNS service: %v", err) } @@ -306,7 +310,13 @@ func (d *DNSProvider) getHostedZone(domain string) (string, error) { return "", fmt.Errorf("no matching domain found for domain %s", authZone) } - return zones.ManagedZones[0].Name, nil + for _, z := range zones.ManagedZones { + if z.Visibility == "public" { + return z.Name, nil + } + } + + return "", fmt.Errorf("no public zone found for domain %s", authZone) } func (d *DNSProvider) findTxtRecords(zone, fqdn string) ([]*dns.ResourceRecordSet, error) { diff --git a/vendor/github.com/go-acme/lego/providers/dns/sakuracloud/client.go b/vendor/github.com/go-acme/lego/providers/dns/sakuracloud/client.go new file mode 100644 index 000000000..10b8a1f68 --- /dev/null +++ b/vendor/github.com/go-acme/lego/providers/dns/sakuracloud/client.go @@ -0,0 +1,106 @@ +package sakuracloud + +import ( + "fmt" + "net/http" + "strings" + + "github.com/go-acme/lego/challenge/dns01" + "github.com/sacloud/libsacloud/api" + "github.com/sacloud/libsacloud/sacloud" +) + +const sacloudAPILockKey = "lego/dns/sacloud" + +func (d *DNSProvider) addTXTRecord(fqdn, domain, value string, ttl int) error { + sacloud.LockByKey(sacloudAPILockKey) + defer sacloud.UnlockByKey(sacloudAPILockKey) + + zone, err := d.getHostedZone(domain) + if err != nil { + return fmt.Errorf("sakuracloud: %v", err) + } + + name := d.extractRecordName(fqdn, zone.Name) + + zone.AddRecord(zone.CreateNewRecord(name, "TXT", value, ttl)) + _, err = d.client.Update(zone.ID, zone) + if err != nil { + return fmt.Errorf("sakuracloud: API call failed: %v", err) + } + + return nil +} + +func (d *DNSProvider) cleanupTXTRecord(fqdn, domain string) error { + sacloud.LockByKey(sacloudAPILockKey) + defer sacloud.UnlockByKey(sacloudAPILockKey) + + zone, err := d.getHostedZone(domain) + if err != nil { + return fmt.Errorf("sakuracloud: %v", err) + } + + records := d.findTxtRecords(fqdn, zone) + + for _, record := range records { + var updRecords []sacloud.DNSRecordSet + for _, r := range zone.Settings.DNS.ResourceRecordSets { + if !(r.Name == record.Name && r.Type == record.Type && r.RData == record.RData) { + updRecords = append(updRecords, r) + } + } + zone.Settings.DNS.ResourceRecordSets = updRecords + } + + _, err = d.client.Update(zone.ID, zone) + if err != nil { + return fmt.Errorf("sakuracloud: API call failed: %v", err) + } + return nil +} + +func (d *DNSProvider) getHostedZone(domain string) (*sacloud.DNS, error) { + authZone, err := dns01.FindZoneByFqdn(dns01.ToFqdn(domain)) + if err != nil { + return nil, err + } + + zoneName := dns01.UnFqdn(authZone) + + res, err := d.client.Reset().WithNameLike(zoneName).Find() + if err != nil { + if notFound, ok := err.(api.Error); ok && notFound.ResponseCode() == http.StatusNotFound { + return nil, fmt.Errorf("zone %s not found on SakuraCloud DNS: %v", zoneName, err) + } + return nil, fmt.Errorf("API call failed: %v", err) + } + + for _, zone := range res.CommonServiceDNSItems { + if zone.Name == zoneName { + return &zone, nil + } + } + + return nil, fmt.Errorf("zone %s not found", zoneName) +} + +func (d *DNSProvider) findTxtRecords(fqdn string, zone *sacloud.DNS) []sacloud.DNSRecordSet { + recordName := d.extractRecordName(fqdn, zone.Name) + + var res []sacloud.DNSRecordSet + for _, record := range zone.Settings.DNS.ResourceRecordSets { + if record.Name == recordName && record.Type == "TXT" { + res = append(res, record) + } + } + return res +} + +func (d *DNSProvider) extractRecordName(fqdn, domain string) string { + name := dns01.UnFqdn(fqdn) + if idx := strings.Index(name, "."+domain); idx != -1 { + return name[:idx] + } + return name +} diff --git a/vendor/github.com/go-acme/lego/providers/dns/sakuracloud/sakuracloud.go b/vendor/github.com/go-acme/lego/providers/dns/sakuracloud/sakuracloud.go index a7ffc81e3..87f55a0e5 100644 --- a/vendor/github.com/go-acme/lego/providers/dns/sakuracloud/sakuracloud.go +++ b/vendor/github.com/go-acme/lego/providers/dns/sakuracloud/sakuracloud.go @@ -5,13 +5,11 @@ import ( "errors" "fmt" "net/http" - "strings" "time" "github.com/go-acme/lego/challenge/dns01" "github.com/go-acme/lego/platform/config/env" "github.com/sacloud/libsacloud/api" - "github.com/sacloud/libsacloud/sacloud" ) // Config is used to configure the creation of the DNSProvider @@ -21,6 +19,7 @@ type Config struct { PropagationTimeout time.Duration PollingInterval time.Duration TTL int + HTTPClient *http.Client } // NewDefaultConfig returns a default configuration for the DNSProvider @@ -29,13 +28,16 @@ func NewDefaultConfig() *Config { TTL: env.GetOrDefaultInt("SAKURACLOUD_TTL", dns01.DefaultTTL), PropagationTimeout: env.GetOrDefaultSecond("SAKURACLOUD_PROPAGATION_TIMEOUT", dns01.DefaultPropagationTimeout), PollingInterval: env.GetOrDefaultSecond("SAKURACLOUD_POLLING_INTERVAL", dns01.DefaultPollingInterval), + HTTPClient: &http.Client{ + Timeout: env.GetOrDefaultSecond("SAKURACLOUD_HTTP_TIMEOUT", 10*time.Second), + }, } } // DNSProvider is an implementation of the acme.ChallengeProvider interface. type DNSProvider struct { config *Config - client *api.Client + client *api.DNSAPI } // NewDNSProvider returns a DNSProvider instance configured for SakuraCloud. @@ -67,58 +69,29 @@ func NewDNSProviderConfig(config *Config) (*DNSProvider, error) { return nil, errors.New("sakuracloud: AccessSecret is missing") } - client := api.NewClient(config.Token, config.Secret, "tk1a") + apiClient := api.NewClient(config.Token, config.Secret, "is1a") + if config.HTTPClient == nil { + apiClient.HTTPClient = http.DefaultClient + } else { + apiClient.HTTPClient = config.HTTPClient + } - return &DNSProvider{client: client, config: config}, nil + return &DNSProvider{ + client: apiClient.GetDNSAPI(), + config: config, + }, nil } // Present creates a TXT record to fulfill the dns-01 challenge. func (d *DNSProvider) Present(domain, token, keyAuth string) error { fqdn, value := dns01.GetRecord(domain, keyAuth) - - zone, err := d.getHostedZone(domain) - if err != nil { - return fmt.Errorf("sakuracloud: %v", err) - } - - name := d.extractRecordName(fqdn, zone.Name) - - zone.AddRecord(zone.CreateNewRecord(name, "TXT", value, d.config.TTL)) - _, err = d.client.GetDNSAPI().Update(zone.ID, zone) - if err != nil { - return fmt.Errorf("sakuracloud: API call failed: %v", err) - } - - return nil + return d.addTXTRecord(fqdn, domain, value, d.config.TTL) } // CleanUp removes the TXT record matching the specified parameters. func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { fqdn, _ := dns01.GetRecord(domain, keyAuth) - - zone, err := d.getHostedZone(domain) - if err != nil { - return fmt.Errorf("sakuracloud: %v", err) - } - - records := d.findTxtRecords(fqdn, zone) - - for _, record := range records { - var updRecords []sacloud.DNSRecordSet - for _, r := range zone.Settings.DNS.ResourceRecordSets { - if !(r.Name == record.Name && r.Type == record.Type && r.RData == record.RData) { - updRecords = append(updRecords, r) - } - } - zone.Settings.DNS.ResourceRecordSets = updRecords - } - - _, err = d.client.GetDNSAPI().Update(zone.ID, zone) - if err != nil { - return fmt.Errorf("sakuracloud: API call failed: %v", err) - } - - return nil + return d.cleanupTXTRecord(fqdn, domain) } // Timeout returns the timeout and interval to use when checking for DNS propagation. @@ -126,48 +99,3 @@ func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { func (d *DNSProvider) Timeout() (timeout, interval time.Duration) { return d.config.PropagationTimeout, d.config.PollingInterval } - -func (d *DNSProvider) getHostedZone(domain string) (*sacloud.DNS, error) { - authZone, err := dns01.FindZoneByFqdn(dns01.ToFqdn(domain)) - if err != nil { - return nil, err - } - - zoneName := dns01.UnFqdn(authZone) - - res, err := d.client.GetDNSAPI().WithNameLike(zoneName).Find() - if err != nil { - if notFound, ok := err.(api.Error); ok && notFound.ResponseCode() == http.StatusNotFound { - return nil, fmt.Errorf("zone %s not found on SakuraCloud DNS: %v", zoneName, err) - } - return nil, fmt.Errorf("API call failed: %v", err) - } - - for _, zone := range res.CommonServiceDNSItems { - if zone.Name == zoneName { - return &zone, nil - } - } - - return nil, fmt.Errorf("zone %s not found", zoneName) -} - -func (d *DNSProvider) findTxtRecords(fqdn string, zone *sacloud.DNS) []sacloud.DNSRecordSet { - recordName := d.extractRecordName(fqdn, zone.Name) - - var res []sacloud.DNSRecordSet - for _, record := range zone.Settings.DNS.ResourceRecordSets { - if record.Name == recordName && record.Type == "TXT" { - res = append(res, record) - } - } - return res -} - -func (d *DNSProvider) extractRecordName(fqdn, domain string) string { - name := dns01.UnFqdn(fqdn) - if idx := strings.Index(name, "."+domain); idx != -1 { - return name[:idx] - } - return name -} diff --git a/vendor/github.com/sacloud/libsacloud/api/archive.go b/vendor/github.com/sacloud/libsacloud/api/archive.go index c752adb64..be13faa7e 100644 --- a/vendor/github.com/sacloud/libsacloud/api/archive.go +++ b/vendor/github.com/sacloud/libsacloud/api/archive.go @@ -2,10 +2,11 @@ package api import ( "fmt" - "github.com/sacloud/libsacloud/sacloud" - "github.com/sacloud/libsacloud/sacloud/ostype" "strings" "time" + + "github.com/sacloud/libsacloud/sacloud" + "github.com/sacloud/libsacloud/sacloud/ostype" ) // ArchiveAPI アーカイブAPI @@ -15,25 +16,30 @@ type ArchiveAPI struct { } var ( - archiveLatestStableCentOSTags = []string{"current-stable", "distro-centos"} - archiveLatestStableCentOS6Tags = []string{"distro-centos", "distro-ver-6.9"} - archiveLatestStableUbuntuTags = []string{"current-stable", "distro-ubuntu"} - archiveLatestStableDebianTags = []string{"current-stable", "distro-debian"} - archiveLatestStableVyOSTags = []string{"current-stable", "distro-vyos"} - archiveLatestStableCoreOSTags = []string{"current-stable", "distro-coreos"} - archiveLatestStableRancherOSTags = []string{"current-stable", "distro-rancheros"} - archiveLatestStableKusanagiTags = []string{"current-stable", "pkg-kusanagi"} - archiveLatestStableSophosUTMTags = []string{"current-stable", "pkg-sophosutm"} - archiveLatestStableFreeBSDTags = []string{"current-stable", "distro-freebsd"} - archiveLatestStableWindows2012Tags = []string{"os-windows", "distro-ver-2012.2"} - archiveLatestStableWindows2012RDSTags = []string{"os-windows", "distro-ver-2012.2", "windows-rds"} - archiveLatestStableWindows2012RDSOfficeTags = []string{"os-windows", "distro-ver-2012.2", "windows-rds", "with-office"} - archiveLatestStableWindows2016Tags = []string{"os-windows", "distro-ver-2016"} - archiveLatestStableWindows2016RDSTags = []string{"os-windows", "distro-ver-2016", "windows-rds"} - archiveLatestStableWindows2016RDSOfficeTags = []string{"os-windows", "distro-ver-2016", "windows-rds", "with-office"} - archiveLatestStableWindows2016SQLServerWeb = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2016", "edition-web"} - archiveLatestStableWindows2016SQLServerStandard = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2016", "edition-standard"} - archiveLatestStableWindows2016SQLServerStandardAll = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2016", "edition-standard", "windows-rds", "with-office"} + archiveLatestStableCentOSTags = []string{"current-stable", "distro-centos"} + archiveLatestStableCentOS6Tags = []string{"distro-centos", "distro-ver-6.10"} + archiveLatestStableUbuntuTags = []string{"current-stable", "distro-ubuntu"} + archiveLatestStableDebianTags = []string{"current-stable", "distro-debian"} + archiveLatestStableVyOSTags = []string{"current-stable", "distro-vyos"} + archiveLatestStableCoreOSTags = []string{"current-stable", "distro-coreos"} + archiveLatestStableRancherOSTags = []string{"current-stable", "distro-rancheros"} + archiveLatestStableKusanagiTags = []string{"current-stable", "pkg-kusanagi"} + archiveLatestStableSophosUTMTags = []string{"current-stable", "pkg-sophosutm"} + archiveLatestStableFreeBSDTags = []string{"current-stable", "distro-freebsd"} + archiveLatestStableNetwiserTags = []string{"current-stable", "pkg-netwiserve"} + archiveLatestStableOPNsenseTags = []string{"current-stable", "distro-opnsense"} + archiveLatestStableWindows2012Tags = []string{"os-windows", "distro-ver-2012.2"} + archiveLatestStableWindows2012RDSTags = []string{"os-windows", "distro-ver-2012.2", "windows-rds"} + archiveLatestStableWindows2012RDSOfficeTags = []string{"os-windows", "distro-ver-2012.2", "windows-rds", "with-office"} + archiveLatestStableWindows2016Tags = []string{"os-windows", "distro-ver-2016"} + archiveLatestStableWindows2016RDSTags = []string{"os-windows", "distro-ver-2016", "windows-rds"} + archiveLatestStableWindows2016RDSOfficeTags = []string{"os-windows", "distro-ver-2016", "windows-rds", "with-office"} + archiveLatestStableWindows2016SQLServerWeb = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2016", "edition-web"} + archiveLatestStableWindows2016SQLServerStandard = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2016", "edition-standard"} + archiveLatestStableWindows2016SQLServer2017Standard = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2017", "edition-standard"} + archiveLatestStableWindows2016SQLServerStandardAll = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2016", "edition-standard", "windows-rds", "with-office"} + archiveLatestStableWindows2016SQLServer2017StandardAll = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2017", "edition-standard", "windows-rds", "with-office"} + archiveLatestStableWindows2019Tags = []string{"os-windows", "distro-ver-2019"} ) // NewArchiveAPI アーカイブAPI作成 @@ -48,25 +54,29 @@ func NewArchiveAPI(client *Client) *ArchiveAPI { } api.findFuncMapPerOSType = map[ostype.ArchiveOSTypes]func() (*sacloud.Archive, error){ - ostype.CentOS: api.FindLatestStableCentOS, - ostype.CentOS6: api.FindLatestStableCentOS6, - ostype.Ubuntu: api.FindLatestStableUbuntu, - ostype.Debian: api.FindLatestStableDebian, - ostype.VyOS: api.FindLatestStableVyOS, - ostype.CoreOS: api.FindLatestStableCoreOS, - ostype.RancherOS: api.FindLatestStableRancherOS, - ostype.Kusanagi: api.FindLatestStableKusanagi, - ostype.SophosUTM: api.FindLatestStableSophosUTM, - ostype.FreeBSD: api.FindLatestStableFreeBSD, - ostype.Windows2012: api.FindLatestStableWindows2012, - ostype.Windows2012RDS: api.FindLatestStableWindows2012RDS, - ostype.Windows2012RDSOffice: api.FindLatestStableWindows2012RDSOffice, - ostype.Windows2016: api.FindLatestStableWindows2016, - ostype.Windows2016RDS: api.FindLatestStableWindows2016RDS, - ostype.Windows2016RDSOffice: api.FindLatestStableWindows2016RDSOffice, - ostype.Windows2016SQLServerWeb: api.FindLatestStableWindows2016SQLServerWeb, - ostype.Windows2016SQLServerStandard: api.FindLatestStableWindows2016SQLServerStandard, - ostype.Windows2016SQLServerStandardAll: api.FindLatestStableWindows2016SQLServerStandardAll, + ostype.CentOS: api.FindLatestStableCentOS, + ostype.CentOS6: api.FindLatestStableCentOS6, + ostype.Ubuntu: api.FindLatestStableUbuntu, + ostype.Debian: api.FindLatestStableDebian, + ostype.VyOS: api.FindLatestStableVyOS, + ostype.CoreOS: api.FindLatestStableCoreOS, + ostype.RancherOS: api.FindLatestStableRancherOS, + ostype.Kusanagi: api.FindLatestStableKusanagi, + ostype.SophosUTM: api.FindLatestStableSophosUTM, + ostype.FreeBSD: api.FindLatestStableFreeBSD, + ostype.Netwiser: api.FindLatestStableNetwiser, + ostype.OPNsense: api.FindLatestStableOPNsense, + ostype.Windows2012: api.FindLatestStableWindows2012, + ostype.Windows2012RDS: api.FindLatestStableWindows2012RDS, + ostype.Windows2012RDSOffice: api.FindLatestStableWindows2012RDSOffice, + ostype.Windows2016: api.FindLatestStableWindows2016, + ostype.Windows2016RDS: api.FindLatestStableWindows2016RDS, + ostype.Windows2016RDSOffice: api.FindLatestStableWindows2016RDSOffice, + ostype.Windows2016SQLServerWeb: api.FindLatestStableWindows2016SQLServerWeb, + ostype.Windows2016SQLServerStandard: api.FindLatestStableWindows2016SQLServerStandard, + ostype.Windows2016SQLServer2017Standard: api.FindLatestStableWindows2016SQLServer2017Standard, + ostype.Windows2016SQLServerStandardAll: api.FindLatestStableWindows2016SQLServerStandardAll, + ostype.Windows2016SQLServer2017StandardAll: api.FindLatestStableWindows2016SQLServer2017StandardAll, } return api @@ -137,6 +147,14 @@ func (api *ArchiveAPI) CanEditDisk(id int64) (bool, error) { if archive.HasTag("pkg-sophosutm") || archive.IsSophosUTM() { return false, nil } + // OPNsenseであれば編集不可 + if archive.HasTag("distro-opnsense") { + return false, nil + } + // Netwiser VEであれば編集不可 + if archive.HasTag("pkg-netwiserve") { + return false, nil + } for _, t := range allowDiskEditTags { if archive.HasTag(t) { @@ -180,6 +198,14 @@ func (api *ArchiveAPI) GetPublicArchiveIDFromAncestors(id int64) (int64, bool) { if archive.HasTag("pkg-sophosutm") || archive.IsSophosUTM() { return emptyID, false } + // OPNsenseであれば編集不可 + if archive.HasTag("distro-opnsense") { + return emptyID, false + } + // Netwiser VEであれば編集不可 + if archive.HasTag("pkg-netwiserve") { + return emptyID, false + } for _, t := range allowDiskEditTags { if archive.HasTag(t) { @@ -249,6 +275,16 @@ func (api *ArchiveAPI) FindLatestStableFreeBSD() (*sacloud.Archive, error) { return api.findByOSTags(archiveLatestStableFreeBSDTags) } +// FindLatestStableNetwiser 安定版最新のNetwiserパブリックアーカイブを取得 +func (api *ArchiveAPI) FindLatestStableNetwiser() (*sacloud.Archive, error) { + return api.findByOSTags(archiveLatestStableNetwiserTags) +} + +// FindLatestStableOPNsense 安定版最新のOPNsenseパブリックアーカイブを取得 +func (api *ArchiveAPI) FindLatestStableOPNsense() (*sacloud.Archive, error) { + return api.findByOSTags(archiveLatestStableOPNsenseTags) +} + // FindLatestStableWindows2012 安定版最新のWindows2012パブリックアーカイブを取得 func (api *ArchiveAPI) FindLatestStableWindows2012() (*sacloud.Archive, error) { return api.findByOSTags(archiveLatestStableWindows2012Tags, map[string]interface{}{ @@ -305,13 +341,34 @@ func (api *ArchiveAPI) FindLatestStableWindows2016SQLServerStandard() (*sacloud. }) } +// FindLatestStableWindows2016SQLServer2017Standard 安定版最新のWindows2016 SQLServer2017(Standard) パブリックアーカイブを取得 +func (api *ArchiveAPI) FindLatestStableWindows2016SQLServer2017Standard() (*sacloud.Archive, error) { + return api.findByOSTags(archiveLatestStableWindows2016SQLServer2017Standard, map[string]interface{}{ + "Name": "Windows Server 2016 for MS SQL 2017(Standard)", + }) +} + // FindLatestStableWindows2016SQLServerStandardAll 安定版最新のWindows2016 SQLServer(RDS+Office) パブリックアーカイブを取得 func (api *ArchiveAPI) FindLatestStableWindows2016SQLServerStandardAll() (*sacloud.Archive, error) { - return api.findByOSTags(archiveLatestStableWindows2016SQLServerStandard, map[string]interface{}{ + return api.findByOSTags(archiveLatestStableWindows2016SQLServerStandardAll, map[string]interface{}{ "Name": "Windows Server 2016 for MS SQL 2016(Std) with RDS / MS Office", }) } +// FindLatestStableWindows2016SQLServer2017StandardAll 安定版最新のWindows2016 SQLServer2017(RDS+Office) パブリックアーカイブを取得 +func (api *ArchiveAPI) FindLatestStableWindows2016SQLServer2017StandardAll() (*sacloud.Archive, error) { + return api.findByOSTags(archiveLatestStableWindows2016SQLServer2017StandardAll, map[string]interface{}{ + "Name": "Windows Server 2016 for MS SQL 2017(Std) with RDS / MS Office", + }) +} + +// FindLatestStableWindows2019 安定版最新のWindows2019パブリックアーカイブを取得 +func (api *ArchiveAPI) FindLatestStableWindows2019() (*sacloud.Archive, error) { + return api.findByOSTags(archiveLatestStableWindows2019Tags, map[string]interface{}{ + "Name": "Windows Server 2019 Datacenter Edition", + }) +} + // FindByOSType 指定のOS種別の安定版最新のパブリックアーカイブを取得 func (api *ArchiveAPI) FindByOSType(os ostype.ArchiveOSTypes) (*sacloud.Archive, error) { if f, ok := api.findFuncMapPerOSType[os]; ok { diff --git a/vendor/github.com/sacloud/libsacloud/api/auth_status.go b/vendor/github.com/sacloud/libsacloud/api/auth_status.go index d9fd2c73d..e1d9621b2 100644 --- a/vendor/github.com/sacloud/libsacloud/api/auth_status.go +++ b/vendor/github.com/sacloud/libsacloud/api/auth_status.go @@ -2,6 +2,7 @@ package api import ( "encoding/json" + "github.com/sacloud/libsacloud/sacloud" ) diff --git a/vendor/github.com/sacloud/libsacloud/api/auto_backup.go b/vendor/github.com/sacloud/libsacloud/api/auto_backup.go index d9944e099..2bd237ab6 100644 --- a/vendor/github.com/sacloud/libsacloud/api/auto_backup.go +++ b/vendor/github.com/sacloud/libsacloud/api/auto_backup.go @@ -1,8 +1,8 @@ package api import ( - "encoding/json" - // "strings" + "encoding/json" // "strings" + "github.com/sacloud/libsacloud/sacloud" ) diff --git a/vendor/github.com/sacloud/libsacloud/api/base_api.go b/vendor/github.com/sacloud/libsacloud/api/base_api.go index a071869ce..a0a25f2c2 100644 --- a/vendor/github.com/sacloud/libsacloud/api/base_api.go +++ b/vendor/github.com/sacloud/libsacloud/api/base_api.go @@ -3,8 +3,9 @@ package api import ( "encoding/json" "fmt" - "github.com/sacloud/libsacloud/sacloud" "net/url" + + "github.com/sacloud/libsacloud/sacloud" ) type baseAPI struct { @@ -137,7 +138,7 @@ func (api *baseAPI) filterBy(key string, value interface{}, multiple bool) *base if f, ok := state.Filter[key]; ok { if s, ok := f.(string); ok && s != "" { if v, ok := value.(string); ok { - state.Filter[key] = fmt.Sprintf("%s %s", s, v) + state.Filter[key] = fmt.Sprintf("%s%%20%s", s, v) return } } diff --git a/vendor/github.com/sacloud/libsacloud/api/bill.go b/vendor/github.com/sacloud/libsacloud/api/bill.go index 2caba0b25..caf252dc8 100644 --- a/vendor/github.com/sacloud/libsacloud/api/bill.go +++ b/vendor/github.com/sacloud/libsacloud/api/bill.go @@ -4,10 +4,11 @@ import ( "encoding/csv" "encoding/json" "fmt" - "github.com/sacloud/libsacloud/sacloud" "io" "strings" "time" + + "github.com/sacloud/libsacloud/sacloud" ) // BillAPI 請求情報API diff --git a/vendor/github.com/sacloud/libsacloud/api/cdrom.go b/vendor/github.com/sacloud/libsacloud/api/cdrom.go index 5bda66b97..49a479c66 100644 --- a/vendor/github.com/sacloud/libsacloud/api/cdrom.go +++ b/vendor/github.com/sacloud/libsacloud/api/cdrom.go @@ -2,8 +2,9 @@ package api import ( "fmt" - "github.com/sacloud/libsacloud/sacloud" "time" + + "github.com/sacloud/libsacloud/sacloud" ) // CDROMAPI ISOイメージAPI diff --git a/vendor/github.com/sacloud/libsacloud/api/client.go b/vendor/github.com/sacloud/libsacloud/api/client.go index f3ca5a9af..19977429c 100644 --- a/vendor/github.com/sacloud/libsacloud/api/client.go +++ b/vendor/github.com/sacloud/libsacloud/api/client.go @@ -4,14 +4,15 @@ import ( "bytes" "encoding/json" "fmt" - "github.com/sacloud/libsacloud" - "github.com/sacloud/libsacloud/sacloud" "io" "io/ioutil" "log" "net/http" "strings" "time" + + "github.com/sacloud/libsacloud" + "github.com/sacloud/libsacloud/sacloud" ) var ( @@ -44,6 +45,8 @@ type Client struct { RetryMax int // 503エラー時のリトライ待ち時間 RetryInterval time.Duration + // APIコール時に利用される*http.Client 未指定の場合http.DefaultClientが利用される + HTTPClient *http.Client } // NewClient APIクライアント作成 @@ -73,8 +76,11 @@ func (c *Client) Clone() *Client { DefaultTimeoutDuration: c.DefaultTimeoutDuration, UserAgent: c.UserAgent, AcceptLanguage: c.AcceptLanguage, + RequestTracer: c.RequestTracer, + ResponseTracer: c.ResponseTracer, RetryMax: c.RetryMax, RetryInterval: c.RetryInterval, + HTTPClient: c.HTTPClient, } n.API = newAPI(n) return n @@ -111,6 +117,7 @@ func (c *Client) isOkStatus(code int) bool { func (c *Client) newRequest(method, uri string, body interface{}) ([]byte, error) { var ( client = &retryableHTTPClient{ + Client: c.HTTPClient, retryMax: c.RetryMax, retryInterval: c.RetryInterval, } @@ -232,12 +239,15 @@ func newRequest(method, url string, body io.ReadSeeker) (*request, error) { } type retryableHTTPClient struct { - http.Client + *http.Client retryInterval time.Duration retryMax int } func (c *retryableHTTPClient) Do(req *request) (*http.Response, error) { + if c.Client == nil { + c.Client = http.DefaultClient + } for i := 0; ; i++ { if req.body != nil { @@ -277,6 +287,7 @@ type API struct { Bill *BillAPI // 請求情報API Bridge *BridgeAPI // ブリッジAPi CDROM *CDROMAPI // ISOイメージAPI + Coupon *CouponAPI // クーポンAPI Database *DatabaseAPI // データベースAPI Disk *DiskAPI // ディスクAPI DNS *DNSAPI // DNS API @@ -295,6 +306,7 @@ type API struct { NFS *NFSAPI // NFS API Note *NoteAPI // スタートアップスクリプトAPI PacketFilter *PacketFilterAPI // パケットフィルタAPI + ProxyLB *ProxyLBAPI // プロキシLBAPI PrivateHost *PrivateHostAPI // 専有ホストAPI Product *ProductAPI // 製品情報API Server *ServerAPI // サーバーAPI @@ -337,6 +349,11 @@ func (api *API) GetCDROMAPI() *CDROMAPI { return api.CDROM } +// GetCouponAPI クーポン情報API取得 +func (api *API) GetCouponAPI() *CouponAPI { + return api.Coupon +} + // GetDatabaseAPI データベースAPI取得 func (api *API) GetDatabaseAPI() *DatabaseAPI { return api.Database @@ -432,6 +449,11 @@ func (api *API) GetPacketFilterAPI() *PacketFilterAPI { return api.PacketFilter } +// GetProxyLBAPI プロキシLBAPI取得 +func (api *API) GetProxyLBAPI() *ProxyLBAPI { + return api.ProxyLB +} + // GetPrivateHostAPI 専有ホストAPI取得 func (api *API) GetPrivateHostAPI() *PrivateHostAPI { return api.PrivateHost @@ -566,6 +588,7 @@ func newAPI(client *Client) *API { Bill: NewBillAPI(client), Bridge: NewBridgeAPI(client), CDROM: NewCDROMAPI(client), + Coupon: NewCouponAPI(client), Database: NewDatabaseAPI(client), Disk: NewDiskAPI(client), DNS: NewDNSAPI(client), @@ -587,6 +610,7 @@ func newAPI(client *Client) *API { NFS: NewNFSAPI(client), Note: NewNoteAPI(client), PacketFilter: NewPacketFilterAPI(client), + ProxyLB: NewProxyLBAPI(client), PrivateHost: NewPrivateHostAPI(client), Product: &ProductAPI{ Server: NewProductServerAPI(client), diff --git a/vendor/github.com/sacloud/libsacloud/api/coupon.go b/vendor/github.com/sacloud/libsacloud/api/coupon.go new file mode 100644 index 000000000..6dcfb84f8 --- /dev/null +++ b/vendor/github.com/sacloud/libsacloud/api/coupon.go @@ -0,0 +1,59 @@ +package api + +import ( + "encoding/json" + "fmt" + + "github.com/sacloud/libsacloud/sacloud" +) + +// CouponAPI クーポン情報API +type CouponAPI struct { + *baseAPI +} + +// NewCouponAPI クーポン情報API作成 +func NewCouponAPI(client *Client) *CouponAPI { + return &CouponAPI{ + &baseAPI{ + client: client, + apiRootSuffix: sakuraBillingAPIRootSuffix, + FuncGetResourceURL: func() string { + return "coupon" + }, + }, + } +} + +// CouponResponse クーポン情報レスポンス +type CouponResponse struct { + *sacloud.ResultFlagValue + // AllCount 件数 + AllCount int `json:",omitempty"` + // CountPerPage ページあたり件数 + CountPerPage int `json:",omitempty"` + // Page 現在のページ番号 + Page int `json:",omitempty"` + // Coupons クーポン情報 リスト + Coupons []*sacloud.Coupon +} + +// Find クーポン情報 全件取得 +func (api *CouponAPI) Find() ([]*sacloud.Coupon, error) { + authStatus, err := api.client.AuthStatus.Read() + if err != nil { + return nil, err + } + accountID := authStatus.Account.GetStrID() + + uri := fmt.Sprintf("%s/%s", api.getResourceURL(), accountID) + data, err := api.client.newRequest("GET", uri, nil) + if err != nil { + return nil, err + } + var res CouponResponse + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res.Coupons, nil +} diff --git a/vendor/github.com/sacloud/libsacloud/api/database.go b/vendor/github.com/sacloud/libsacloud/api/database.go index 9ebe9a148..5efcb2f79 100644 --- a/vendor/github.com/sacloud/libsacloud/api/database.go +++ b/vendor/github.com/sacloud/libsacloud/api/database.go @@ -3,8 +3,9 @@ package api import ( "encoding/json" "fmt" - "github.com/sacloud/libsacloud/sacloud" "time" + + "github.com/sacloud/libsacloud/sacloud" ) //HACK: さくらのAPI側仕様: Applianceの内容によってJSONフォーマットが異なるため diff --git a/vendor/github.com/sacloud/libsacloud/api/disk.go b/vendor/github.com/sacloud/libsacloud/api/disk.go index a90a4b00c..fa39a22f3 100644 --- a/vendor/github.com/sacloud/libsacloud/api/disk.go +++ b/vendor/github.com/sacloud/libsacloud/api/disk.go @@ -2,8 +2,9 @@ package api import ( "fmt" - "github.com/sacloud/libsacloud/sacloud" "time" + + "github.com/sacloud/libsacloud/sacloud" ) var ( @@ -56,7 +57,50 @@ func (api *DiskAPI) Create(value *sacloud.Disk) (*sacloud.Disk, error) { Success string `json:",omitempty"` } res := &diskResponse{} - err := api.create(api.createRequest(value), res) + + rawBody := &sacloud.Request{} + rawBody.Disk = value + if len(value.DistantFrom) > 0 { + rawBody.DistantFrom = value.DistantFrom + value.DistantFrom = []int64{} + } + + err := api.create(rawBody, res) + if err != nil { + return nil, err + } + return res.Disk, nil +} + +// CreateWithConfig ディスク作成とディスクの修正、サーバ起動(指定されていれば)を1回のAPI呼び出しで実行 +func (api *DiskAPI) CreateWithConfig(value *sacloud.Disk, config *sacloud.DiskEditValue, bootAtAvailable bool) (*sacloud.Disk, error) { + //HACK: さくらのAPI側仕様: 戻り値:Successがbool値へ変換できないため文字列で受ける("Accepted"などが返る) + type diskResponse struct { + *sacloud.Response + // Success + Success string `json:",omitempty"` + } + res := &diskResponse{} + + type diskRequest struct { + *sacloud.Request + Config *sacloud.DiskEditValue `json:",omitempty"` + BootAtAvailable bool `json:",omitempty"` + } + + rawBody := &diskRequest{ + Request: &sacloud.Request{}, + BootAtAvailable: bootAtAvailable, + } + rawBody.Disk = value + rawBody.Config = config + + if len(value.DistantFrom) > 0 { + rawBody.DistantFrom = value.DistantFrom + value.DistantFrom = []int64{} + } + + err := api.create(rawBody, res) if err != nil { return nil, err } @@ -90,7 +134,14 @@ func (api *DiskAPI) install(id int64, body *sacloud.Disk) (bool, error) { Success string `json:",omitempty"` } res := &diskResponse{} - err := api.baseAPI.request(method, uri, body, res) + rawBody := &sacloud.Request{} + rawBody.Disk = body + if len(body.DistantFrom) > 0 { + rawBody.DistantFrom = body.DistantFrom + body.DistantFrom = []int64{} + } + + err := api.baseAPI.request(method, uri, rawBody, res) if err != nil { return false, err } @@ -213,6 +264,14 @@ func (api *DiskAPI) CanEditDisk(id int64) (bool, error) { if disk.HasTag("pkg-sophosutm") || disk.IsSophosUTM() { return false, nil } + // OPNsenseであれば編集不可 + if disk.HasTag("distro-opnsense") { + return false, nil + } + // Netwiser VEであれば編集不可 + if disk.HasTag("pkg-netwiserve") { + return false, nil + } // ソースアーカイブ/ソースディスクともに持っていない場合 if disk.SourceArchive == nil && disk.SourceDisk == nil { @@ -263,6 +322,14 @@ func (api *DiskAPI) GetPublicArchiveIDFromAncestors(id int64) (int64, bool) { if disk.HasTag("pkg-sophosutm") || disk.IsSophosUTM() { return emptyID, false } + // OPNsenseであれば編集不可 + if disk.HasTag("distro-opnsense") { + return emptyID, false + } + // Netwiser VEであれば編集不可 + if disk.HasTag("pkg-netwiserve") { + return emptyID, false + } for _, t := range allowDiskEditTags { if disk.HasTag(t) { diff --git a/vendor/github.com/sacloud/libsacloud/api/dns.go b/vendor/github.com/sacloud/libsacloud/api/dns.go index 3483a8355..b81e9612d 100644 --- a/vendor/github.com/sacloud/libsacloud/api/dns.go +++ b/vendor/github.com/sacloud/libsacloud/api/dns.go @@ -2,8 +2,9 @@ package api import ( "encoding/json" - "github.com/sacloud/libsacloud/sacloud" "strings" + + "github.com/sacloud/libsacloud/sacloud" ) //HACK: さくらのAPI側仕様: CommonServiceItemsの内容によってJSONフォーマットが異なるため diff --git a/vendor/github.com/sacloud/libsacloud/api/error.go b/vendor/github.com/sacloud/libsacloud/api/error.go index 5e0ca2a86..366dd3f91 100644 --- a/vendor/github.com/sacloud/libsacloud/api/error.go +++ b/vendor/github.com/sacloud/libsacloud/api/error.go @@ -2,6 +2,7 @@ package api import ( "fmt" + "github.com/sacloud/libsacloud/sacloud" ) diff --git a/vendor/github.com/sacloud/libsacloud/api/gslb.go b/vendor/github.com/sacloud/libsacloud/api/gslb.go index bce4cc2dd..976f297ca 100644 --- a/vendor/github.com/sacloud/libsacloud/api/gslb.go +++ b/vendor/github.com/sacloud/libsacloud/api/gslb.go @@ -1,8 +1,8 @@ package api import ( - "encoding/json" - // "strings" + "encoding/json" // "strings" + "github.com/sacloud/libsacloud/sacloud" ) diff --git a/vendor/github.com/sacloud/libsacloud/api/interface.go b/vendor/github.com/sacloud/libsacloud/api/interface.go index 21282c6c2..bcfa1bf05 100644 --- a/vendor/github.com/sacloud/libsacloud/api/interface.go +++ b/vendor/github.com/sacloud/libsacloud/api/interface.go @@ -2,6 +2,7 @@ package api import ( "fmt" + "github.com/sacloud/libsacloud/sacloud" ) @@ -81,3 +82,26 @@ func (api *InterfaceAPI) DisconnectFromPacketFilter(interfaceID int64) (bool, er ) return api.modify(method, uri, nil) } + +// SetDisplayIPAddress 表示用IPアドレス 設定 +func (api *InterfaceAPI) SetDisplayIPAddress(interfaceID int64, ipaddress string) (bool, error) { + var ( + method = "PUT" + uri = fmt.Sprintf("/%s/%d", api.getResourceURL(), interfaceID) + ) + body := map[string]interface{}{ + "Interface": map[string]string{ + "UserIPAddress": ipaddress, + }, + } + return api.modify(method, uri, body) +} + +// DeleteDisplayIPAddress 表示用IPアドレス 削除 +func (api *InterfaceAPI) DeleteDisplayIPAddress(interfaceID int64) (bool, error) { + var ( + method = "DELETE" + uri = fmt.Sprintf("/%s/%d", api.getResourceURL(), interfaceID) + ) + return api.modify(method, uri, nil) +} diff --git a/vendor/github.com/sacloud/libsacloud/api/internet.go b/vendor/github.com/sacloud/libsacloud/api/internet.go index 015134143..64a21fd59 100644 --- a/vendor/github.com/sacloud/libsacloud/api/internet.go +++ b/vendor/github.com/sacloud/libsacloud/api/internet.go @@ -2,8 +2,9 @@ package api import ( "fmt" - "github.com/sacloud/libsacloud/sacloud" "time" + + "github.com/sacloud/libsacloud/sacloud" ) // InternetAPI ルーターAPI diff --git a/vendor/github.com/sacloud/libsacloud/api/ipaddress.go b/vendor/github.com/sacloud/libsacloud/api/ipaddress.go index 3de95259c..42a07b6d8 100644 --- a/vendor/github.com/sacloud/libsacloud/api/ipaddress.go +++ b/vendor/github.com/sacloud/libsacloud/api/ipaddress.go @@ -2,6 +2,7 @@ package api import ( "fmt" + "github.com/sacloud/libsacloud/sacloud" ) diff --git a/vendor/github.com/sacloud/libsacloud/api/ipv6addr.go b/vendor/github.com/sacloud/libsacloud/api/ipv6addr.go index 7186d3e05..5884742b2 100644 --- a/vendor/github.com/sacloud/libsacloud/api/ipv6addr.go +++ b/vendor/github.com/sacloud/libsacloud/api/ipv6addr.go @@ -2,6 +2,7 @@ package api import ( "fmt" + "github.com/sacloud/libsacloud/sacloud" ) diff --git a/vendor/github.com/sacloud/libsacloud/api/load_balancer.go b/vendor/github.com/sacloud/libsacloud/api/load_balancer.go index c5873c696..bd942830e 100644 --- a/vendor/github.com/sacloud/libsacloud/api/load_balancer.go +++ b/vendor/github.com/sacloud/libsacloud/api/load_balancer.go @@ -3,8 +3,9 @@ package api import ( "encoding/json" "fmt" - "github.com/sacloud/libsacloud/sacloud" "time" + + "github.com/sacloud/libsacloud/sacloud" ) //HACK: さくらのAPI側仕様: Applianceの内容によってJSONフォーマットが異なるため @@ -38,6 +39,12 @@ type loadBalancerResponse struct { Success interface{} `json:",omitempty"` //HACK: さくらのAPI側仕様: 戻り値:Successがbool値へ変換できないためinterface{} } +type loadBalancerStatusResponse struct { + *sacloud.ResultFlagValue + Success interface{} `json:",omitempty"` //HACK: さくらのAPI側仕様: 戻り値:Successがbool値へ変換できないためinterface{} + LoadBalancer *sacloud.LoadBalancerStatusResult `json:",omitempty"` +} + // LoadBalancerAPI ロードバランサーAPI type LoadBalancerAPI struct { *baseAPI @@ -230,3 +237,20 @@ func (api *LoadBalancerAPI) AsyncSleepWhileCopying(id int64, timeout time.Durati func (api *LoadBalancerAPI) Monitor(id int64, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) { return api.baseAPI.applianceMonitorBy(id, "interface", 0, body) } + +// Status ステータス取得 +func (api *LoadBalancerAPI) Status(id int64) (*sacloud.LoadBalancerStatusResult, error) { + var ( + method = "GET" + uri = fmt.Sprintf("%s/%d/status", api.getResourceURL(), id) + res = &loadBalancerStatusResponse{} + ) + err := api.baseAPI.request(method, uri, nil, res) + if err != nil { + return nil, err + } + if res.LoadBalancer == nil { + return nil, nil + } + return res.LoadBalancer, nil +} diff --git a/vendor/github.com/sacloud/libsacloud/api/mobile_gateway.go b/vendor/github.com/sacloud/libsacloud/api/mobile_gateway.go index 3ead7f735..1edc0a24f 100644 --- a/vendor/github.com/sacloud/libsacloud/api/mobile_gateway.go +++ b/vendor/github.com/sacloud/libsacloud/api/mobile_gateway.go @@ -3,8 +3,9 @@ package api import ( "encoding/json" "fmt" - "github.com/sacloud/libsacloud/sacloud" "time" + + "github.com/sacloud/libsacloud/sacloud" ) // SearchMobileGatewayResponse モバイルゲートウェイ検索レスポンス @@ -41,6 +42,14 @@ type mobileGatewaySIMResponse struct { Success interface{} `json:",omitempty"` //HACK: さくらのAPI側仕様: 戻り値:Successがbool値へ変換できないためinterface{} } +type trafficMonitoringBody struct { + TrafficMonitoring *sacloud.TrafficMonitoringConfig `json:"traffic_monitoring_config"` +} + +type trafficStatusBody struct { + TrafficStatus *sacloud.TrafficStatus `json:"traffic_status"` +} + // MobileGatewayAPI モバイルゲートウェイAPI type MobileGatewayAPI struct { *baseAPI @@ -322,8 +331,8 @@ func (api *MobileGatewayAPI) AddSIMRoute(id int64, simID int64, prefix string) ( param := &sacloud.MobileGatewaySIMRoutes{ SIMRoutes: routes, } - added := param.AddSIMRoute(simID, prefix) - if !added { + index, added := param.AddSIMRoute(simID, prefix) + if index < 0 || added == nil { return false, nil } @@ -412,3 +421,60 @@ func (api *MobileGatewayAPI) Logs(id int64, body interface{}) ([]sacloud.SIMLog, } return res.Logs, nil } + +// GetTrafficMonitoringConfig トラフィックコントロール 取得 +func (api *MobileGatewayAPI) GetTrafficMonitoringConfig(id int64) (*sacloud.TrafficMonitoringConfig, error) { + var ( + method = "GET" + uri = fmt.Sprintf("%s/%d/mobilegateway/traffic_monitoring", api.getResourceURL(), id) + ) + + res := &trafficMonitoringBody{} + err := api.baseAPI.request(method, uri, nil, res) + if err != nil { + return nil, err + } + return res.TrafficMonitoring, nil +} + +// SetTrafficMonitoringConfig トラフィックコントロール 設定 +func (api *MobileGatewayAPI) SetTrafficMonitoringConfig(id int64, trafficMonConfig *sacloud.TrafficMonitoringConfig) (bool, error) { + var ( + method = "PUT" + uri = fmt.Sprintf("%s/%d/mobilegateway/traffic_monitoring", api.getResourceURL(), id) + ) + + req := &trafficMonitoringBody{ + TrafficMonitoring: trafficMonConfig, + } + return api.modify(method, uri, req) +} + +// DisableTrafficMonitoringConfig トラフィックコントロール 解除 +func (api *MobileGatewayAPI) DisableTrafficMonitoringConfig(id int64) (bool, error) { + var ( + method = "DELETE" + uri = fmt.Sprintf("%s/%d/mobilegateway/traffic_monitoring", api.getResourceURL(), id) + ) + return api.modify(method, uri, nil) +} + +// GetTrafficStatus 当月通信量 取得 +func (api *MobileGatewayAPI) GetTrafficStatus(id int64) (*sacloud.TrafficStatus, error) { + var ( + method = "GET" + uri = fmt.Sprintf("%s/%d/mobilegateway/traffic_status", api.getResourceURL(), id) + ) + + res := &trafficStatusBody{} + err := api.baseAPI.request(method, uri, nil, res) + if err != nil { + return nil, err + } + return res.TrafficStatus, nil +} + +// MonitorBy 指定位置のインターフェースのアクティビティーモニター取得 +func (api *MobileGatewayAPI) MonitorBy(id int64, nicIndex int, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) { + return api.baseAPI.applianceMonitorBy(id, "interface", nicIndex, body) +} diff --git a/vendor/github.com/sacloud/libsacloud/api/newsfeed.go b/vendor/github.com/sacloud/libsacloud/api/newsfeed.go index 4adab1caf..f2dad265d 100644 --- a/vendor/github.com/sacloud/libsacloud/api/newsfeed.go +++ b/vendor/github.com/sacloud/libsacloud/api/newsfeed.go @@ -2,6 +2,7 @@ package api import ( "encoding/json" + "github.com/sacloud/libsacloud/sacloud" ) diff --git a/vendor/github.com/sacloud/libsacloud/api/nfs.go b/vendor/github.com/sacloud/libsacloud/api/nfs.go index f044fc86a..a90da2516 100644 --- a/vendor/github.com/sacloud/libsacloud/api/nfs.go +++ b/vendor/github.com/sacloud/libsacloud/api/nfs.go @@ -2,9 +2,11 @@ package api import ( "encoding/json" + "errors" "fmt" - "github.com/sacloud/libsacloud/sacloud" "time" + + "github.com/sacloud/libsacloud/sacloud" ) // SearchNFSResponse NFS検索レスポンス @@ -94,6 +96,58 @@ func (api *NFSAPI) Create(value *sacloud.NFS) (*sacloud.NFS, error) { }) } +// CreateWithPlan プラン/サイズを指定してNFSを作成 +func (api *NFSAPI) CreateWithPlan(value *sacloud.CreateNFSValue, plan sacloud.NFSPlan, size sacloud.NFSSize) (*sacloud.NFS, error) { + + nfs := sacloud.NewNFS(value) + // get plan + plans, err := api.GetNFSPlans() + if err != nil { + return nil, err + } + if plans == nil { + return nil, errors.New("NFS plans not found") + } + + planID := plans.FindPlanID(plan, size) + if planID < 0 { + return nil, errors.New("NFS plans not found") + } + + nfs.Plan = sacloud.NewResource(planID) + nfs.Remark.SetRemarkPlanID(planID) + + return api.request(func(res *nfsResponse) error { + return api.create(api.createRequest(nfs), res) + }) +} + +// GetNFSPlans プラン一覧取得 +func (api *NFSAPI) GetNFSPlans() (*sacloud.NFSPlans, error) { + notes, err := api.client.Note.Reset().Find() + if err != nil { + return nil, err + } + for _, note := range notes.Notes { + if note.Class == sacloud.ENoteClass("json") && note.Name == "sys-nfs" { + rawPlans := note.Content + + var plans struct { + Plans *sacloud.NFSPlans `json:"plans"` + } + + err := json.Unmarshal([]byte(rawPlans), &plans) + if err != nil { + return nil, err + } + + return plans.Plans, nil + } + } + + return nil, nil +} + // Read 読み取り func (api *NFSAPI) Read(id int64) (*sacloud.NFS, error) { return api.request(func(res *nfsResponse) error { @@ -223,9 +277,9 @@ func (api *NFSAPI) AsyncSleepWhileCopying(id int64, timeout time.Duration, maxRe return poll(handler, timeout) } -// MonitorNFS NFS固有項目アクティビティモニター取得 -func (api *NFSAPI) MonitorNFS(id int64, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) { - return api.baseAPI.applianceMonitorBy(id, "nfs", 0, body) +// MonitorFreeDiskSize NFSディスク残量アクティビティモニター取得 +func (api *NFSAPI) MonitorFreeDiskSize(id int64, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) { + return api.baseAPI.applianceMonitorBy(id, "database", 0, body) } // MonitorInterface NICアクティビティーモニター取得 diff --git a/vendor/github.com/sacloud/libsacloud/api/polling.go b/vendor/github.com/sacloud/libsacloud/api/polling.go index bdff6bd43..fbc774f5a 100644 --- a/vendor/github.com/sacloud/libsacloud/api/polling.go +++ b/vendor/github.com/sacloud/libsacloud/api/polling.go @@ -21,16 +21,16 @@ func poll(handler pollingHandler, timeout time.Duration) (chan (interface{}), ch select { case <-tick: exit, state, err := handler() + if state != nil { + progChan <- state + } if err != nil { errChan <- fmt.Errorf("Failed: poll: %s", err) return } - if state != nil { - progChan <- state - if exit { - compChan <- state - return - } + if exit { + compChan <- state + return } case <-bomb: errChan <- fmt.Errorf("Timeout") @@ -65,9 +65,9 @@ type hasFailed interface { func waitingForAvailableFunc(readFunc func() (hasAvailable, error), maxRetry int) func() (bool, interface{}, error) { counter := 0 return func() (bool, interface{}, error) { - counter++ v, err := readFunc() if err != nil { + counter++ if maxRetry > 0 && counter < maxRetry { return false, nil, nil } @@ -96,9 +96,9 @@ type hasUpDown interface { func waitingForUpFunc(readFunc func() (hasUpDown, error), maxRetry int) func() (bool, interface{}, error) { counter := 0 return func() (bool, interface{}, error) { - counter++ v, err := readFunc() if err != nil { + counter++ if maxRetry > 0 && counter < maxRetry { return false, nil, nil } @@ -118,9 +118,9 @@ func waitingForUpFunc(readFunc func() (hasUpDown, error), maxRetry int) func() ( func waitingForDownFunc(readFunc func() (hasUpDown, error), maxRetry int) func() (bool, interface{}, error) { counter := 0 return func() (bool, interface{}, error) { - counter++ v, err := readFunc() if err != nil { + counter++ if maxRetry > 0 && counter < maxRetry { return false, nil, nil } @@ -140,9 +140,9 @@ func waitingForDownFunc(readFunc func() (hasUpDown, error), maxRetry int) func() func waitingForReadFunc(readFunc func() (interface{}, error), maxRetry int) func() (bool, interface{}, error) { counter := 0 return func() (bool, interface{}, error) { - counter++ v, err := readFunc() if err != nil { + counter++ if maxRetry > 0 && counter < maxRetry { return false, nil, nil } diff --git a/vendor/github.com/sacloud/libsacloud/api/product_server.go b/vendor/github.com/sacloud/libsacloud/api/product_server.go index f5efff13d..63807eee8 100644 --- a/vendor/github.com/sacloud/libsacloud/api/product_server.go +++ b/vendor/github.com/sacloud/libsacloud/api/product_server.go @@ -2,8 +2,8 @@ package api import ( "fmt" + "github.com/sacloud/libsacloud/sacloud" - "strconv" ) // ProductServerAPI サーバープランAPI @@ -24,48 +24,50 @@ func NewProductServerAPI(client *Client) *ProductServerAPI { } } -func (api *ProductServerAPI) getPlanIDBySpec(core int, memGB int) (int64, error) { - //assert args - if core <= 0 { - return -1, fmt.Errorf("Invalid Parameter: CPU Core") - } - if memGB <= 0 { - return -1, fmt.Errorf("Invalid Parameter: Memory Size(GB)") - } - - return strconv.ParseInt(fmt.Sprintf("%d%03d", memGB, core), 10, 64) -} - -// IsValidPlan 指定のコア数/メモリサイズのプランが存在し、有効であるか判定 -func (api *ProductServerAPI) IsValidPlan(core int, memGB int) (bool, error) { - - planID, err := api.getPlanIDBySpec(core, memGB) - if err != nil { - return false, err - } - productServer, err := api.Read(planID) - - if err != nil { - return false, err - } - - if productServer != nil { - return true, nil - } - - return false, fmt.Errorf("Server Plan[%d] Not Found", planID) - -} - -// GetBySpec 指定のコア数/メモリサイズのサーバープランを取得 -func (api *ProductServerAPI) GetBySpec(core int, memGB int) (*sacloud.ProductServer, error) { - planID, err := api.getPlanIDBySpec(core, memGB) - - productServer, err := api.Read(planID) - +// GetBySpec 指定のコア数/メモリサイズ/世代のプランを取得 +func (api *ProductServerAPI) GetBySpec(core int, memGB int, gen sacloud.PlanGenerations) (*sacloud.ProductServer, error) { + plans, err := api.Reset().Find() if err != nil { return nil, err } + var res sacloud.ProductServer + var found bool + for _, plan := range plans.ServerPlans { + if plan.CPU == core && plan.GetMemoryGB() == memGB { + if gen == sacloud.PlanDefault || gen == plan.Generation { + // PlanDefaultの場合は複数ヒットしうる。 + // この場合より新しい世代を優先する。 + if found && plan.Generation <= res.Generation { + continue + } + res = plan + found = true + } + } + } - return productServer, nil + if !found { + return nil, fmt.Errorf("Server Plan[core:%d, memory:%d, gen:%d] is not found", core, memGB, gen) + } + return &res, nil +} + +// IsValidPlan 指定のコア数/メモリサイズ/世代のプランが存在し、有効であるか判定 +func (api *ProductServerAPI) IsValidPlan(core int, memGB int, gen sacloud.PlanGenerations) (bool, error) { + + productServer, err := api.GetBySpec(core, memGB, gen) + + if err != nil { + return false, err + } + + if productServer == nil { + return false, fmt.Errorf("Server Plan[core:%d, memory:%d, gen:%d] is not found", core, memGB, gen) + } + + if productServer.Availability != sacloud.EAAvailable { + return false, fmt.Errorf("Server Plan[core:%d, memory:%d, gen:%d] is not available", core, memGB, gen) + } + + return true, nil } diff --git a/vendor/github.com/sacloud/libsacloud/api/proxylb.go b/vendor/github.com/sacloud/libsacloud/api/proxylb.go new file mode 100644 index 000000000..9e45596f3 --- /dev/null +++ b/vendor/github.com/sacloud/libsacloud/api/proxylb.go @@ -0,0 +1,224 @@ +package api + +import ( + "encoding/json" // "strings" + "fmt" + + "github.com/sacloud/libsacloud/sacloud" +) + +//HACK: さくらのAPI側仕様: CommonServiceItemsの内容によってJSONフォーマットが異なるため +// DNS/ProxyLB/シンプル監視それぞれでリクエスト/レスポンスデータ型を定義する。 + +// SearchProxyLBResponse ProxyLB検索レスポンス +type SearchProxyLBResponse struct { + // Total 総件数 + Total int `json:",omitempty"` + // From ページング開始位置 + From int `json:",omitempty"` + // Count 件数 + Count int `json:",omitempty"` + // CommonServiceProxyLBItems ProxyLBリスト + CommonServiceProxyLBItems []sacloud.ProxyLB `json:"CommonServiceItems,omitempty"` +} + +type proxyLBRequest struct { + CommonServiceProxyLBItem *sacloud.ProxyLB `json:"CommonServiceItem,omitempty"` + From int `json:",omitempty"` + Count int `json:",omitempty"` + Sort []string `json:",omitempty"` + Filter map[string]interface{} `json:",omitempty"` + Exclude []string `json:",omitempty"` + Include []string `json:",omitempty"` +} + +type proxyLBResponse struct { + *sacloud.ResultFlagValue + *sacloud.ProxyLB `json:"CommonServiceItem,omitempty"` +} + +// ProxyLBAPI ProxyLB API +type ProxyLBAPI struct { + *baseAPI +} + +// NewProxyLBAPI ProxyLB API作成 +func NewProxyLBAPI(client *Client) *ProxyLBAPI { + return &ProxyLBAPI{ + &baseAPI{ + client: client, + FuncGetResourceURL: func() string { + return "commonserviceitem" + }, + FuncBaseSearchCondition: func() *sacloud.Request { + res := &sacloud.Request{} + res.AddFilter("Provider.Class", "proxylb") + return res + }, + }, + } +} + +// Find 検索 +func (api *ProxyLBAPI) Find() (*SearchProxyLBResponse, error) { + + data, err := api.client.newRequest("GET", api.getResourceURL(), api.getSearchState()) + if err != nil { + return nil, err + } + var res SearchProxyLBResponse + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return &res, nil +} + +func (api *ProxyLBAPI) request(f func(*proxyLBResponse) error) (*sacloud.ProxyLB, error) { + res := &proxyLBResponse{} + err := f(res) + if err != nil { + return nil, err + } + return res.ProxyLB, nil +} + +func (api *ProxyLBAPI) createRequest(value *sacloud.ProxyLB) *proxyLBResponse { + return &proxyLBResponse{ProxyLB: value} +} + +// New 新規作成用パラメーター作成 +func (api *ProxyLBAPI) New(name string) *sacloud.ProxyLB { + return sacloud.CreateNewProxyLB(name) +} + +// Create 新規作成 +func (api *ProxyLBAPI) Create(value *sacloud.ProxyLB) (*sacloud.ProxyLB, error) { + return api.request(func(res *proxyLBResponse) error { + return api.create(api.createRequest(value), res) + }) +} + +// Read 読み取り +func (api *ProxyLBAPI) Read(id int64) (*sacloud.ProxyLB, error) { + return api.request(func(res *proxyLBResponse) error { + return api.read(id, nil, res) + }) +} + +// Update 更新 +func (api *ProxyLBAPI) Update(id int64, value *sacloud.ProxyLB) (*sacloud.ProxyLB, error) { + return api.request(func(res *proxyLBResponse) error { + return api.update(id, api.createRequest(value), res) + }) +} + +// UpdateSetting 設定更新 +func (api *ProxyLBAPI) UpdateSetting(id int64, value *sacloud.ProxyLB) (*sacloud.ProxyLB, error) { + req := &sacloud.ProxyLB{ + // Settings + Settings: value.Settings, + } + return api.request(func(res *proxyLBResponse) error { + return api.update(id, api.createRequest(req), res) + }) +} + +// Delete 削除 +func (api *ProxyLBAPI) Delete(id int64) (*sacloud.ProxyLB, error) { + return api.request(func(res *proxyLBResponse) error { + return api.delete(id, nil, res) + }) +} + +// ChangePlan プラン変更 +func (api *ProxyLBAPI) ChangePlan(id int64, newPlan sacloud.ProxyLBPlan) (*sacloud.ProxyLB, error) { + var ( + method = "PUT" + uri = fmt.Sprintf("%s/%d/plan", api.getResourceURL(), id) + ) + body := &sacloud.ProxyLB{} + body.SetPlan(newPlan) + realBody := map[string]interface{}{ + "CommonServiceItem": map[string]interface{}{ + "ServiceClass": body.ServiceClass, + }, + } + + return api.request(func(res *proxyLBResponse) error { + return api.baseAPI.request(method, uri, realBody, res) + }) +} + +type proxyLBCertificateResponse struct { + *sacloud.ResultFlagValue + ProxyLB *sacloud.ProxyLBCertificates `json:",omitempty"` +} + +// GetCertificates 証明書取得 +func (api *ProxyLBAPI) GetCertificates(id int64) (*sacloud.ProxyLBCertificates, error) { + var ( + method = "GET" + uri = fmt.Sprintf("%s/%d/proxylb/sslcertificate", api.getResourceURL(), id) + res = &proxyLBCertificateResponse{} + ) + err := api.baseAPI.request(method, uri, nil, res) + if err != nil { + return nil, err + } + if res.ProxyLB == nil { + return nil, nil + } + return res.ProxyLB, nil +} + +// SetCertificates 証明書設定 +func (api *ProxyLBAPI) SetCertificates(id int64, certs *sacloud.ProxyLBCertificates) (bool, error) { + var ( + method = "PUT" + uri = fmt.Sprintf("%s/%d/proxylb/sslcertificate", api.getResourceURL(), id) + res = &proxyLBCertificateResponse{} + ) + err := api.baseAPI.request(method, uri, map[string]interface{}{ + "ProxyLB": certs, + }, res) + if err != nil { + return false, err + } + return true, nil +} + +// DeleteCertificates 証明書削除 +func (api *ProxyLBAPI) DeleteCertificates(id int64) (bool, error) { + var ( + method = "DELETE" + uri = fmt.Sprintf("%s/%d/proxylb/sslcertificate", api.getResourceURL(), id) + ) + return api.baseAPI.modify(method, uri, nil) +} + +type proxyLBHealthResponse struct { + *sacloud.ResultFlagValue + ProxyLB *sacloud.ProxyLBStatus `json:",omitempty"` +} + +// Health ヘルスチェックステータス取得 +func (api *ProxyLBAPI) Health(id int64) (*sacloud.ProxyLBStatus, error) { + var ( + method = "GET" + uri = fmt.Sprintf("%s/%d/health", api.getResourceURL(), id) + res = &proxyLBHealthResponse{} + ) + err := api.baseAPI.request(method, uri, nil, res) + if err != nil { + return nil, err + } + if res.ProxyLB == nil { + return nil, nil + } + return res.ProxyLB, nil +} + +// Monitor アクティビティーモニター取得 +func (api *ProxyLBAPI) Monitor(id int64, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) { + return api.baseAPI.applianceMonitorBy(id, "activity/proxylb", 0, body) +} diff --git a/vendor/github.com/sacloud/libsacloud/api/proxylb_gen.go b/vendor/github.com/sacloud/libsacloud/api/proxylb_gen.go new file mode 100644 index 000000000..daa413d82 --- /dev/null +++ b/vendor/github.com/sacloud/libsacloud/api/proxylb_gen.go @@ -0,0 +1,238 @@ +package api + +/************************************************ + generated by IDE. for [ProxyLBAPI] +************************************************/ + +import ( + "github.com/sacloud/libsacloud/sacloud" +) + +/************************************************ + To support fluent interface for Find() +************************************************/ + +// Reset 検索条件のリセット +func (api *ProxyLBAPI) Reset() *ProxyLBAPI { + api.reset() + return api +} + +// Offset オフセット +func (api *ProxyLBAPI) Offset(offset int) *ProxyLBAPI { + api.offset(offset) + return api +} + +// Limit リミット +func (api *ProxyLBAPI) Limit(limit int) *ProxyLBAPI { + api.limit(limit) + return api +} + +// Include 取得する項目 +func (api *ProxyLBAPI) Include(key string) *ProxyLBAPI { + api.include(key) + return api +} + +// Exclude 除外する項目 +func (api *ProxyLBAPI) Exclude(key string) *ProxyLBAPI { + api.exclude(key) + return api +} + +// FilterBy 指定キーでのフィルター +func (api *ProxyLBAPI) FilterBy(key string, value interface{}) *ProxyLBAPI { + api.filterBy(key, value, false) + return api +} + +// FilterMultiBy 任意項目でのフィルタ(完全一致 OR条件) +func (api *ProxyLBAPI) FilterMultiBy(key string, value interface{}) *ProxyLBAPI { + api.filterBy(key, value, true) + return api +} + +// WithNameLike 名称条件 +func (api *ProxyLBAPI) WithNameLike(name string) *ProxyLBAPI { + return api.FilterBy("Name", name) +} + +// WithTag タグ条件 +func (api *ProxyLBAPI) WithTag(tag string) *ProxyLBAPI { + return api.FilterBy("Tags.Name", tag) +} + +// WithTags タグ(複数)条件 +func (api *ProxyLBAPI) WithTags(tags []string) *ProxyLBAPI { + return api.FilterBy("Tags.Name", []interface{}{tags}) +} + +// func (api *ProxyLBAPI) WithSizeGib(size int) *ProxyLBAPI { +// api.FilterBy("SizeMB", size*1024) +// return api +// } + +// func (api *ProxyLBAPI) WithSharedScope() *ProxyLBAPI { +// api.FilterBy("Scope", "shared") +// return api +// } + +// func (api *ProxyLBAPI) WithUserScope() *ProxyLBAPI { +// api.FilterBy("Scope", "user") +// return api +// } + +// SortBy 指定キーでのソート +func (api *ProxyLBAPI) SortBy(key string, reverse bool) *ProxyLBAPI { + api.sortBy(key, reverse) + return api +} + +// SortByName 名称でのソート +func (api *ProxyLBAPI) SortByName(reverse bool) *ProxyLBAPI { + api.sortByName(reverse) + return api +} + +// func (api *ProxyLBAPI) SortBySize(reverse bool) *ProxyLBAPI { +// api.sortBy("SizeMB", reverse) +// return api +// } + +/************************************************ + To support Setxxx interface for Find() +************************************************/ + +// SetEmpty 検索条件のリセット +func (api *ProxyLBAPI) SetEmpty() { + api.reset() +} + +// SetOffset オフセット +func (api *ProxyLBAPI) SetOffset(offset int) { + api.offset(offset) +} + +// SetLimit リミット +func (api *ProxyLBAPI) SetLimit(limit int) { + api.limit(limit) +} + +// SetInclude 取得する項目 +func (api *ProxyLBAPI) SetInclude(key string) { + api.include(key) +} + +// SetExclude 除外する項目 +func (api *ProxyLBAPI) SetExclude(key string) { + api.exclude(key) +} + +// SetFilterBy 指定キーでのフィルター +func (api *ProxyLBAPI) SetFilterBy(key string, value interface{}) { + api.filterBy(key, value, false) +} + +// SetFilterMultiBy 任意項目でのフィルタ(完全一致 OR条件) +func (api *ProxyLBAPI) SetFilterMultiBy(key string, value interface{}) { + api.filterBy(key, value, true) +} + +// SetNameLike 名称条件 +func (api *ProxyLBAPI) SetNameLike(name string) { + api.FilterBy("Name", name) +} + +// SetTag タグ条件 +func (api *ProxyLBAPI) SetTag(tag string) { + api.FilterBy("Tags.Name", tag) +} + +// SetTags タグ(複数)条件 +func (api *ProxyLBAPI) SetTags(tags []string) { + api.FilterBy("Tags.Name", []interface{}{tags}) +} + +// func (api *ProxyLBAPI) SetSizeGib(size int) { +// api.FilterBy("SizeMB", size*1024) +// } + +// func (api *ProxyLBAPI) SetSharedScope() { +// api.FilterBy("Scope", "shared") +// } + +// func (api *ProxyLBAPI) SetUserScope() { +// api.FilterBy("Scope", "user") +// } + +// SetSortBy 指定キーでのソート +func (api *ProxyLBAPI) SetSortBy(key string, reverse bool) { + api.sortBy(key, reverse) +} + +// SetSortByName 名称でのソート +func (api *ProxyLBAPI) SetSortByName(reverse bool) { + api.sortByName(reverse) +} + +// func (api *ProxyLBAPI) SetSortBySize(reverse bool) { +// api.sortBy("SizeMB", reverse) +// } + +/************************************************ + To support CRUD(Create/Read/Update/Delete) +************************************************/ + +// func (api *ProxyLBAPI) New() *sacloud.ProxyLB { +// return &sacloud.ProxyLB{} +// } + +// func (api *ProxyLBAPI) Create(value *sacloud.ProxyLB) (*sacloud.ProxyLB, error) { +// return api.request(func(res *sacloud.Response) error { +// return api.create(api.createRequest(value), res) +// }) +// } + +// func (api *ProxyLBAPI) Read(id string) (*sacloud.ProxyLB, error) { +// return api.request(func(res *sacloud.Response) error { +// return api.read(id, nil, res) +// }) +// } + +// func (api *ProxyLBAPI) Update(id string, value *sacloud.ProxyLB) (*sacloud.ProxyLB, error) { +// return api.request(func(res *sacloud.Response) error { +// return api.update(id, api.createRequest(value), res) +// }) +// } + +// func (api *ProxyLBAPI) Delete(id string) (*sacloud.ProxyLB, error) { +// return api.request(func(res *sacloud.Response) error { +// return api.delete(id, nil, res) +// }) +// } + +/************************************************ + Inner functions +************************************************/ + +func (api *ProxyLBAPI) setStateValue(setFunc func(*sacloud.Request)) *ProxyLBAPI { + api.baseAPI.setStateValue(setFunc) + return api +} + +//func (api *ProxyLBAPI) request(f func(*sacloud.Response) error) (*sacloud.ProxyLB, error) { +// res := &sacloud.Response{} +// err := f(res) +// if err != nil { +// return nil, err +// } +// return res.ProxyLB, nil +//} +// +//func (api *ProxyLBAPI) createRequest(value *sacloud.ProxyLB) *sacloud.Request { +// req := &sacloud.Request{} +// req.ProxyLB = value +// return req +//} diff --git a/vendor/github.com/sacloud/libsacloud/api/rate_limit_transport.go b/vendor/github.com/sacloud/libsacloud/api/rate_limit_transport.go new file mode 100644 index 000000000..7ea6c4a3b --- /dev/null +++ b/vendor/github.com/sacloud/libsacloud/api/rate_limit_transport.go @@ -0,0 +1,32 @@ +package api + +import ( + "go.uber.org/ratelimit" + + "net/http" + "sync" +) + +// RateLimitRoundTripper 秒間アクセス数を制限するためのhttp.RoundTripper実装 +type RateLimitRoundTripper struct { + // Transport 親となるhttp.RoundTripper、nilの場合http.DefaultTransportが利用される + Transport http.RoundTripper + // RateLimitPerSec 秒あたりのリクエスト数 + RateLimitPerSec int + + once sync.Once + rateLimit ratelimit.Limiter +} + +// RoundTrip http.RoundTripperの実装 +func (r *RateLimitRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + r.once.Do(func() { + r.rateLimit = ratelimit.New(r.RateLimitPerSec) + }) + if r.Transport == nil { + r.Transport = http.DefaultTransport + } + + r.rateLimit.Take() + return r.Transport.RoundTrip(req) +} diff --git a/vendor/github.com/sacloud/libsacloud/api/server.go b/vendor/github.com/sacloud/libsacloud/api/server.go index 16d81fe05..4572c6bd9 100644 --- a/vendor/github.com/sacloud/libsacloud/api/server.go +++ b/vendor/github.com/sacloud/libsacloud/api/server.go @@ -2,8 +2,9 @@ package api import ( "fmt" - "github.com/sacloud/libsacloud/sacloud" "time" + + "github.com/sacloud/libsacloud/sacloud" ) // ServerAPI サーバーAPI @@ -150,14 +151,18 @@ func (api *ServerAPI) SleepUntilDown(id int64, timeout time.Duration) error { } // ChangePlan サーバープラン変更(サーバーIDが変更となるため注意) -func (api *ServerAPI) ChangePlan(serverID int64, planID string) (*sacloud.Server, error) { +func (api *ServerAPI) ChangePlan(serverID int64, plan *sacloud.ProductServer) (*sacloud.Server, error) { var ( method = "PUT" - uri = fmt.Sprintf("%s/%d/to/plan/%s", api.getResourceURL(), serverID, planID) + uri = fmt.Sprintf("%s/%d/plan", api.getResourceURL(), serverID) + body = &sacloud.ProductServer{} ) + body.CPU = plan.CPU + body.MemoryMB = plan.MemoryMB + body.Generation = plan.Generation return api.request(func(res *sacloud.Response) error { - return api.baseAPI.request(method, uri, nil, res) + return api.baseAPI.request(method, uri, body, res) }) } diff --git a/vendor/github.com/sacloud/libsacloud/api/sim.go b/vendor/github.com/sacloud/libsacloud/api/sim.go index e7be3d718..ec2896915 100644 --- a/vendor/github.com/sacloud/libsacloud/api/sim.go +++ b/vendor/github.com/sacloud/libsacloud/api/sim.go @@ -204,6 +204,37 @@ func (api *SIMAPI) Logs(id int64, body interface{}) ([]sacloud.SIMLog, error) { return res.Logs, nil } +// GetNetworkOperator 通信キャリア 取得 +func (api *SIMAPI) GetNetworkOperator(id int64) (*sacloud.SIMNetworkOperatorConfigs, error) { + + var ( + method = "GET" + uri = fmt.Sprintf("%s/%d/sim/network_operator_config", api.getResourceURL(), id) + ) + + res := &sacloud.SIMNetworkOperatorConfigs{} + err := api.baseAPI.request(method, uri, nil, res) + if err != nil { + return nil, err + } + return res, nil +} + +// SetNetworkOperator 通信キャリア 設定 +func (api *SIMAPI) SetNetworkOperator(id int64, opConfig ...*sacloud.SIMNetworkOperatorConfig) (bool, error) { + + var ( + method = "PUT" + uri = fmt.Sprintf("%s/%d/sim/network_operator_config", api.getResourceURL(), id) + ) + + err := api.baseAPI.request(method, uri, &sacloud.SIMNetworkOperatorConfigs{NetworkOperatorConfigs: opConfig}, nil) + if err != nil { + return false, err + } + return true, nil +} + // Monitor アクティビティーモニター(Up/Down link BPS)取得 func (api *SIMAPI) Monitor(id int64, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) { var ( diff --git a/vendor/github.com/sacloud/libsacloud/api/simple_monitor.go b/vendor/github.com/sacloud/libsacloud/api/simple_monitor.go index 8e1a45b2a..bd72feb32 100644 --- a/vendor/github.com/sacloud/libsacloud/api/simple_monitor.go +++ b/vendor/github.com/sacloud/libsacloud/api/simple_monitor.go @@ -1,9 +1,9 @@ package api import ( - "encoding/json" - // "strings" + "encoding/json" // "strings" "fmt" + "github.com/sacloud/libsacloud/sacloud" ) @@ -118,6 +118,25 @@ func (api *SimpleMonitorAPI) Delete(id int64) (*sacloud.SimpleMonitor, error) { }) } +// Health ヘルスチェック +// +// まだチェックが行われていない場合nilを返す +func (api *SimpleMonitorAPI) Health(id int64) (*sacloud.SimpleMonitorHealthCheckStatus, error) { + var ( + method = "GET" + uri = fmt.Sprintf("%s/%d/health", api.getResourceURL(), id) + ) + res := struct { + SimpleMonitor *sacloud.SimpleMonitorHealthCheckStatus `json:",omitempty"` + }{} + + err := api.baseAPI.request(method, uri, nil, &res) + if err != nil { + return nil, err + } + return res.SimpleMonitor, nil +} + // MonitorResponseTimeSec アクティビティーモニター(レスポンスタイム)取得 func (api *SimpleMonitorAPI) MonitorResponseTimeSec(id int64, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) { var ( diff --git a/vendor/github.com/sacloud/libsacloud/api/ssh_key.go b/vendor/github.com/sacloud/libsacloud/api/ssh_key.go index 631482862..1ad482542 100644 --- a/vendor/github.com/sacloud/libsacloud/api/ssh_key.go +++ b/vendor/github.com/sacloud/libsacloud/api/ssh_key.go @@ -2,6 +2,7 @@ package api import ( "fmt" + "github.com/sacloud/libsacloud/sacloud" ) diff --git a/vendor/github.com/sacloud/libsacloud/api/switch.go b/vendor/github.com/sacloud/libsacloud/api/switch.go index b646c10d8..c98d9623f 100644 --- a/vendor/github.com/sacloud/libsacloud/api/switch.go +++ b/vendor/github.com/sacloud/libsacloud/api/switch.go @@ -2,6 +2,7 @@ package api import ( "fmt" + "github.com/sacloud/libsacloud/sacloud" ) diff --git a/vendor/github.com/sacloud/libsacloud/api/vpc_router.go b/vendor/github.com/sacloud/libsacloud/api/vpc_router.go index 43ad1513d..5a6df64e9 100644 --- a/vendor/github.com/sacloud/libsacloud/api/vpc_router.go +++ b/vendor/github.com/sacloud/libsacloud/api/vpc_router.go @@ -3,8 +3,9 @@ package api import ( "encoding/json" "fmt" - "github.com/sacloud/libsacloud/sacloud" "time" + + "github.com/sacloud/libsacloud/sacloud" ) //HACK: さくらのAPI側仕様: Applianceの内容によってJSONフォーマットが異なるため diff --git a/vendor/github.com/sacloud/libsacloud/api/webaccel.go b/vendor/github.com/sacloud/libsacloud/api/webaccel.go index e3b924b24..81708afd0 100644 --- a/vendor/github.com/sacloud/libsacloud/api/webaccel.go +++ b/vendor/github.com/sacloud/libsacloud/api/webaccel.go @@ -3,8 +3,9 @@ package api import ( "encoding/json" "fmt" - "github.com/sacloud/libsacloud/sacloud" "strings" + + "github.com/sacloud/libsacloud/sacloud" ) // WebAccelAPI ウェブアクセラレータAPI diff --git a/vendor/github.com/sacloud/libsacloud/api/webaccel_search.go b/vendor/github.com/sacloud/libsacloud/api/webaccel_search.go index fc8884b64..28e02163b 100644 --- a/vendor/github.com/sacloud/libsacloud/api/webaccel_search.go +++ b/vendor/github.com/sacloud/libsacloud/api/webaccel_search.go @@ -3,9 +3,10 @@ package api import ( "encoding/json" "fmt" - "github.com/sacloud/libsacloud/sacloud" "net/url" "strings" + + "github.com/sacloud/libsacloud/sacloud" ) // Reset 検索条件のリセット diff --git a/vendor/github.com/sacloud/libsacloud/libsacloud.go b/vendor/github.com/sacloud/libsacloud/libsacloud.go index 1981b5029..ff67288d8 100644 --- a/vendor/github.com/sacloud/libsacloud/libsacloud.go +++ b/vendor/github.com/sacloud/libsacloud/libsacloud.go @@ -2,4 +2,4 @@ package libsacloud // Version バージョン -const Version = "1.0.0-rc5" +const Version = "1.21.1" diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/common_types.go b/vendor/github.com/sacloud/libsacloud/sacloud/common_types.go index 35e88afe9..042b861d7 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/common_types.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/common_types.go @@ -138,6 +138,36 @@ var ( // EDiskConnection ディスク接続方法 type EDiskConnection string +// EUpstreamNetworkType 上流ネットワーク種別 +type EUpstreamNetworkType string + +// String EUpstreamNetworkTypeの文字列表現 +func (t EUpstreamNetworkType) String() string { + return string(t) +} + +var ( + // EUpstreamNetworkUnknown 不明 + EUpstreamNetworkUnknown = EUpstreamNetworkType("unknown") + // EUpstreamNetworkShared 共有セグメント + EUpstreamNetworkShared = EUpstreamNetworkType("shared") + // EUpstreamNetworkSwitch スイッチ(非スイッチ+ルータ) + EUpstreamNetworkSwitch = EUpstreamNetworkType("switch") + // EUpstreamNetworkRouter ルータ(スイッチ+ルータのスイッチ) + EUpstreamNetworkRouter = EUpstreamNetworkType("router") + // EUpstreamNetworkNone 接続なし + EUpstreamNetworkNone = EUpstreamNetworkType("none") + + // UpstreamNetworks 文字列とEUpstreamNetworkTypeのマッピング + UpstreamNetworks = map[string]EUpstreamNetworkType{ + "unknown": EUpstreamNetworkUnknown, + "shared": EUpstreamNetworkShared, + "switch": EUpstreamNetworkSwitch, + "router": EUpstreamNetworkRouter, + "none": EUpstreamNetworkNone, + } +) + // SakuraCloudResources さくらのクラウド上のリソース種別一覧 type SakuraCloudResources struct { Server *Server `json:",omitempty"` // サーバー @@ -213,7 +243,7 @@ type Request struct { Filter map[string]interface{} `json:",omitempty"` // フィルタ Exclude []string `json:",omitempty"` // 除外する項目 Include []string `json:",omitempty"` // 取得する項目 - + DistantFrom []int64 `json:",omitempty"` // ストレージ隔離対象ディスク } // AddFilter フィルタの追加 @@ -324,3 +354,15 @@ var ( // DatetimeLayout さくらのクラウドAPIで利用される日付型のレイアウト(RFC3339) var DatetimeLayout = "2006-01-02T15:04:05-07:00" + +// PlanGenerations サーバプラン世代 +type PlanGenerations int + +var ( + // PlanDefault デフォルト + PlanDefault = PlanGenerations(0) + // PlanG1 第1世代(Generation:100) + PlanG1 = PlanGenerations(100) + // PlanG2 第2世代(Generation:200) + PlanG2 = PlanGenerations(200) +) diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/coupon.go b/vendor/github.com/sacloud/libsacloud/sacloud/coupon.go new file mode 100644 index 000000000..90b6f738a --- /dev/null +++ b/vendor/github.com/sacloud/libsacloud/sacloud/coupon.go @@ -0,0 +1,14 @@ +package sacloud + +import "time" + +// Coupon クーポン情報 +type Coupon struct { + CouponID string `json:",omitempty"` // クーポンID + MemberID string `json:",omitempty"` // メンバーID + ContractID int64 `json:",omitempty"` // 契約ID + ServiceClassID int64 `json:",omitempty"` // サービスクラスID + Discount int64 `json:",omitempty"` // クーポン残高 + AppliedAt time.Time `json:",omitempty"` // 適用開始日 + UntilAt time.Time `json:",omitempty"` // 有効期限 +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/database.go b/vendor/github.com/sacloud/libsacloud/sacloud/database.go index 447f9cd94..4d1b3ad60 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/database.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/database.go @@ -2,9 +2,15 @@ package sacloud import ( "encoding/json" + "fmt" "strings" ) +// AllowDatabaseBackupWeekdays データベースバックアップ実行曜日リスト +func AllowDatabaseBackupWeekdays() []string { + return []string{"mon", "tue", "wed", "thu", "fri", "sat", "sun"} +} + // Database データベース(appliance) type Database struct { *Appliance // アプライアンス共通属性 @@ -64,8 +70,6 @@ type DatabaseCommonRemark struct { DatabaseRevision string `json:",omitempty"` // リビジョン DatabaseTitle string `json:",omitempty"` // タイトル DatabaseVersion string `json:",omitempty"` // バージョン - ReplicaPassword string `json:",omitempty"` // レプリケーションパスワード - ReplicaUser string `json:",omitempty"` // レプリケーションユーザー } // DatabaseSettings データベース設定リスト @@ -75,8 +79,9 @@ type DatabaseSettings struct { // DatabaseSetting データベース設定 type DatabaseSetting struct { - Backup *DatabaseBackupSetting `json:",omitempty"` // バックアップ設定 - Common *DatabaseCommonSetting `json:",oitempty"` // 共通設定 + Backup *DatabaseBackupSetting `json:",omitempty"` // バックアップ設定 + Common *DatabaseCommonSetting `json:",oitempty"` // 共通設定 + Replication *DatabaseReplicationSetting `json:",omitempty"` // レプリケーション設定 } // DatabaseServer データベースサーバー情報 @@ -122,17 +127,20 @@ func AllowDatabasePlans() []int { // DatabaseBackupSetting バックアップ設定 type DatabaseBackupSetting struct { - Rotate int `json:",omitempty"` // ローテーション世代数 - Time string `json:",omitempty"` // 開始時刻 + Rotate int `json:",omitempty"` // ローテーション世代数 + Time string `json:",omitempty"` // 開始時刻 + DayOfWeek []string `json:",omitempty"` // 取得曜日 } // DatabaseCommonSetting 共通設定 type DatabaseCommonSetting struct { - DefaultUser string `json:",omitempty"` // ユーザー名 - UserPassword string `json:",omitempty"` // ユーザーパスワード - WebUI interface{} `json:",omitempty"` // WebUIのIPアドレス or FQDN - ServicePort string // ポート番号 - SourceNetwork SourceNetwork // 接続許可ネットワーク + DefaultUser string `json:",omitempty"` // ユーザー名 + UserPassword string `json:",omitempty"` // ユーザーパスワード + WebUI interface{} `json:",omitempty"` // WebUIのIPアドレス or FQDN + ReplicaPassword string `json:",omitempty"` // レプリケーションパスワード + ReplicaUser string `json:",omitempty"` // レプリケーションユーザー + ServicePort json.Number `json:",omitempty"` // ポート番号 + SourceNetwork SourceNetwork // 接続許可ネットワーク } // SourceNetwork 接続許可ネットワーク @@ -168,32 +176,84 @@ func (s *SourceNetwork) MarshalJSON() ([]byte, error) { return json.Marshal(list) } +// DatabaseReplicationSetting レプリケーション設定 +type DatabaseReplicationSetting struct { + // Model レプリケーションモデル + Model DatabaseReplicationModels `json:",omitempty"` + // Appliance マスター側アプライアンス + Appliance *struct { + ID string + } `json:",omitempty"` + // IPAddress IPアドレス + IPAddress string `json:",omitempty"` + // Port ポート + Port int `json:",omitempty"` + // User ユーザー + User string `json:",omitempty"` + // Password パスワード + Password string `json:",omitempty"` +} + +// DatabaseReplicationModels データベースのレプリケーションモデル +type DatabaseReplicationModels string + +const ( + // DatabaseReplicationModelMasterSlave レプリケーションモデル: Master-Slave(マスター側) + DatabaseReplicationModelMasterSlave = "Master-Slave" + // DatabaseReplicationModelAsyncReplica レプリケーションモデル: Async-Replica(スレーブ側) + DatabaseReplicationModelAsyncReplica = "Async-Replica" +) + // CreateDatabaseValue データベース作成用パラメータ type CreateDatabaseValue struct { - Plan DatabasePlan // プラン - AdminPassword string // 管理者パスワード - DefaultUser string // ユーザー名 - UserPassword string // パスワード - SourceNetwork []string // 接続許可ネットワーク - ServicePort string // ポート - // BackupRotate int // バックアップ世代数 - BackupTime string // バックアップ開始時間 - SwitchID string // 接続先スイッチ - IPAddress1 string // IPアドレス1 - MaskLen int // ネットワークマスク長 - DefaultRoute string // デフォルトルート - Name string // 名称 - Description string // 説明 - Tags []string // タグ - Icon *Resource // アイコン - WebUI bool // WebUI有効 - DatabaseName string // データベース名 - DatabaseRevision string // リビジョン - DatabaseTitle string // データベースタイトル - DatabaseVersion string // データベースバージョン - ReplicaUser string // ReplicaUser レプリケーションユーザー - SourceAppliance *Resource // クローン元DB - //ReplicaPassword string // in current API version , setted admin password + Plan DatabasePlan // プラン + AdminPassword string // 管理者パスワード + DefaultUser string // ユーザー名 + UserPassword string // パスワード + SourceNetwork []string // 接続許可ネットワーク + ServicePort int // ポート + EnableBackup bool // バックアップ有効化 + BackupRotate int // バックアップ世代数 + BackupTime string // バックアップ開始時間 + BackupDayOfWeek []string // バックアップ取得曜日 + SwitchID string // 接続先スイッチ + IPAddress1 string // IPアドレス1 + MaskLen int // ネットワークマスク長 + DefaultRoute string // デフォルトルート + Name string // 名称 + Description string // 説明 + Tags []string // タグ + Icon *Resource // アイコン + WebUI bool // WebUI有効 + DatabaseName string // データベース名 + DatabaseRevision string // リビジョン + DatabaseTitle string // データベースタイトル + DatabaseVersion string // データベースバージョン + // ReplicaUser string // レプリケーションユーザー 現在はreplica固定 + ReplicaPassword string // レプリケーションパスワード + SourceAppliance *Resource // クローン元DB +} + +// SlaveDatabaseValue スレーブデータベース作成用パラメータ +type SlaveDatabaseValue struct { + Plan DatabasePlan // プラン + DefaultUser string // ユーザー名 + UserPassword string // パスワード + SwitchID string // 接続先スイッチ + IPAddress1 string // IPアドレス1 + MaskLen int // ネットワークマスク長 + DefaultRoute string // デフォルトルート + Name string // 名称 + Description string // 説明 + Tags []string // タグ + Icon *Resource // アイコン + DatabaseName string // データベース名 + DatabaseVersion string // データベースバージョン + // ReplicaUser string // レプリケーションユーザー 現在はreplica固定 + ReplicaPassword string // レプリケーションパスワード + MasterApplianceID int64 // クローン元DB + MasterIPAddress string // マスターIPアドレス + MasterPort int // マスターポート } // NewCreatePostgreSQLDatabaseValue PostgreSQL作成用パラメーター @@ -267,10 +327,6 @@ func CreateNewDatabase(values *CreateDatabaseValue) *Database { DatabaseTitle: values.DatabaseTitle, // DatabaseVersion DatabaseVersion: values.DatabaseVersion, - // ReplicaUser - // ReplicaUser: values.ReplicaUser, - // ReplicaPassword - // ReplicaPassword: values.AdminPassword, }, }, // Plan @@ -288,6 +344,8 @@ func CreateNewDatabase(values *CreateDatabaseValue) *Database { Rotate: 8, // Time Time: values.BackupTime, + // DayOfWeek + DayOfWeek: values.BackupDayOfWeek, }, // Common Common: &DatabaseCommonSetting{ @@ -297,13 +355,19 @@ func CreateNewDatabase(values *CreateDatabaseValue) *Database { UserPassword: values.UserPassword, // SourceNetwork SourceNetwork: SourceNetwork(values.SourceNetwork), - // ServicePort - ServicePort: values.ServicePort, }, }, }, } + if values.ServicePort > 0 { + db.Settings.DBConf.Common.ServicePort = json.Number(fmt.Sprintf("%d", values.ServicePort)) + } + + if !values.EnableBackup { + db.Settings.DBConf.Backup = nil + } + db.Remark.Switch = &ApplianceRemarkSwitch{ // ID ID: values.SwitchID, @@ -323,11 +387,19 @@ func CreateNewDatabase(values *CreateDatabaseValue) *Database { db.Settings.DBConf.Common.WebUI = values.WebUI } + if values.ReplicaPassword != "" { + db.Settings.DBConf.Common.ReplicaUser = "replica" + db.Settings.DBConf.Common.ReplicaPassword = values.ReplicaPassword + db.Settings.DBConf.Replication = &DatabaseReplicationSetting{ + Model: DatabaseReplicationModelMasterSlave, + } + } + return db } -// CloneNewDatabase データベース作成 -func CloneNewDatabase(values *CreateDatabaseValue) *Database { +// NewSlaveDatabaseValue スレーブ向けパラメータ作成 +func NewSlaveDatabaseValue(values *SlaveDatabaseValue) *Database { db := &Database{ // Appliance Appliance: &Appliance{ @@ -363,32 +435,34 @@ func CloneNewDatabase(values *CreateDatabaseValue) *Database { DBConf: &DatabaseCommonRemarks{ // Common Common: &DatabaseCommonRemark{ - DatabaseName: values.DatabaseName, + // DatabaseName + DatabaseName: values.DatabaseName, + // DatabaseVersion DatabaseVersion: values.DatabaseVersion, }, }, // Plan - propPlanID: propPlanID{Plan: &Resource{ID: int64(values.Plan)}}, - SourceAppliance: values.SourceAppliance, + propPlanID: propPlanID{Plan: &Resource{ID: int64(values.Plan)}}, }, // Settings Settings: &DatabaseSettings{ // DBConf DBConf: &DatabaseSetting{ - // Backup - Backup: &DatabaseBackupSetting{ - // Rotate - // Rotate: values.BackupRotate, - Rotate: 8, - // Time - Time: values.BackupTime, - }, // Common Common: &DatabaseCommonSetting{ - // SourceNetwork - SourceNetwork: SourceNetwork(values.SourceNetwork), - // ServicePort - ServicePort: values.ServicePort, + // DefaultUser + DefaultUser: values.DefaultUser, + // UserPassword + UserPassword: values.UserPassword, + }, + // Replication + Replication: &DatabaseReplicationSetting{ + Model: DatabaseReplicationModelAsyncReplica, + Appliance: &struct{ ID string }{ID: fmt.Sprintf("%d", values.MasterApplianceID)}, + IPAddress: values.MasterIPAddress, + Port: values.MasterPort, + User: "replica", + Password: values.ReplicaPassword, }, }, }, @@ -409,10 +483,6 @@ func CloneNewDatabase(values *CreateDatabaseValue) *Database { map[string]interface{}{"IPAddress": values.IPAddress1}, } - if values.WebUI { - db.Settings.DBConf.Common.WebUI = values.WebUI - } - return db } @@ -433,3 +503,71 @@ func (s *Database) DeleteSourceNetwork(nw string) { } s.Settings.DBConf.Common.SourceNetwork = SourceNetwork(res) } + +// IsReplicationMaster レプリケーションが有効かつマスターとして構成されているか +func (s *Database) IsReplicationMaster() bool { + return s.IsReplicationEnabled() && s.Settings.DBConf.Replication.Model == DatabaseReplicationModelMasterSlave +} + +// IsReplicationEnabled レプリケーションが有効な場合はTrueを返す +func (s *Database) IsReplicationEnabled() bool { + return s.Settings.DBConf.Replication != nil +} + +// DatabaseName MariaDB or PostgreSQLの何れかを返す +func (s *Database) DatabaseName() string { + return s.Remark.DBConf.Common.DatabaseName +} + +// DatabaseRevision データベースのリビジョンを返す +// +// 例: MariaDBの場合 => 10.2.15 / PostgreSQLの場合 => 10.3 +func (s *Database) DatabaseRevision() string { + return s.Remark.DBConf.Common.DatabaseRevision +} + +// DatabaseVersion データベースのバージョンを返す +// +// 例: MariaDBの場合 => 10.2 / PostgreSQLの場合 => 10 +func (s *Database) DatabaseVersion() string { + return s.Remark.DBConf.Common.DatabaseVersion +} + +// WebUIAddress WebUIが有効な場合、IPアドレス or FQDNを返す、無効な場合は空文字を返す +func (s *Database) WebUIAddress() string { + webUI := s.Settings.DBConf.Common.WebUI + if webUI != nil { + if v, ok := webUI.(string); ok { + return v + } + } + return "" +} + +// IPAddress IPアドレスを取得 +func (s *Database) IPAddress() string { + if len(s.Remark.Servers) < 1 { + return "" + } + v, ok := s.Remark.Servers[0].(map[string]string) + if !ok { + return "" + } + return v["IPAddress"] +} + +// NetworkMaskLen ネットワークマスク長を取得 +func (s *Database) NetworkMaskLen() int { + if s.Remark.Network == nil { + return -1 + } + return s.Remark.Network.NetworkMaskLen +} + +// DefaultRoute デフォルトゲートウェイアドレスを取得 +func (s *Database) DefaultRoute() string { + if s.Remark.Network == nil { + return "" + } + return s.Remark.Network.DefaultRoute +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/disk.go b/vendor/github.com/sacloud/libsacloud/sacloud/disk.go index 9deb36ef4..4f42a9849 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/disk.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/disk.go @@ -4,22 +4,23 @@ import "fmt" // Disk ディスク type Disk struct { - *Resource // ID - propAvailability // 有功状態 - propName // 名称 - propDescription // 説明 - propSizeMB // サイズ(MB単位) - propMigratedMB // コピー済みデータサイズ(MB単位) - propCopySource // コピー元情報 - propJobStatus // マイグレーションジョブステータス - propBundleInfo // バンドル情報 - propServer // サーバー - propIcon // アイコン - propTags // タグ - propCreatedAt // 作成日時 - propPlanID // プランID - propDiskConnection // ディスク接続情報 - propDistantFrom // ストレージ隔離対象ディスク + *Resource // ID + propAvailability // 有功状態 + propName // 名称 + propDescription // 説明 + propSizeMB // サイズ(MB単位) + propMigratedMB // コピー済みデータサイズ(MB単位) + propCopySource // コピー元情報 + propJobStatus // マイグレーションジョブステータス + propBundleInfo // バンドル情報 + propServer // サーバー + propIcon // アイコン + propTags // タグ + propCreatedAt // 作成日時 + propPlanID // プランID + propDiskConnection // ディスク接続情報 + propDistantFrom // ストレージ隔離対象ディスク + Generation PlanGenerations `json:",omitempty"` // プラン世代 ReinstallCount int `json:",omitempty"` // 再インストール回数 diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/dns.go b/vendor/github.com/sacloud/libsacloud/sacloud/dns.go index edfbbc63e..35182573c 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/dns.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/dns.go @@ -50,7 +50,9 @@ func CreateNewDNS(zoneName string) *DNS { Class: "dns", }, Settings: DNSSettings{ - DNS: DNSRecordSets{}, + DNS: DNSRecordSets{ + ResourceRecordSets: []DNSRecordSet{}, + }, }, } } @@ -135,7 +137,9 @@ func (d *DNS) AddRecord(record *DNSRecordSet) { // ClearRecords レコード クリア func (d *DNS) ClearRecords() { - d.Settings.DNS = DNSRecordSets{} + d.Settings.DNS = DNSRecordSets{ + ResourceRecordSets: []DNSRecordSet{}, + } } // DNSRecordSets DNSレコード設定リスト diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/interface.go b/vendor/github.com/sacloud/libsacloud/sacloud/interface.go index 557a3444e..8260686be 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/interface.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/interface.go @@ -41,3 +41,21 @@ func (i *Interface) GetHostName() string { func (i *Interface) GetPacketFilter() *PacketFilter { return i.PacketFilter } + +// UpstreamType 上流ネットワーク種別 +func (i *Interface) UpstreamType() EUpstreamNetworkType { + sw := i.Switch + if sw == nil { + return EUpstreamNetworkNone + } + + if sw.Subnet == nil { + return EUpstreamNetworkSwitch + } + + if sw.Scope == ESCopeShared { + return EUpstreamNetworkShared + } + + return EUpstreamNetworkRouter +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/internet.go b/vendor/github.com/sacloud/libsacloud/sacloud/internet.go index ef7758809..980d81021 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/internet.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/internet.go @@ -41,7 +41,7 @@ func (i *Internet) SetNetworkMaskLen(v int) { // AllowInternetBandWidth 設定可能な帯域幅の値リスト func AllowInternetBandWidth() []int { - return []int{100, 250, 500, 1000, 1500, 2000, 2500, 3000} + return []int{100, 250, 500, 1000, 1500, 2000, 2500, 3000, 5000} } // AllowInternetNetworkMaskLen 設定可能なネットワークマスク長の値リスト diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/loadbalancer.go b/vendor/github.com/sacloud/libsacloud/sacloud/loadbalancer.go index 5401fe85f..71cd1dcbd 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/loadbalancer.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/loadbalancer.go @@ -1,5 +1,7 @@ package sacloud +import "strconv" + // LoadBalancer ロードバランサー type LoadBalancer struct { *Appliance // アプライアンス共通属性 @@ -8,6 +10,43 @@ type LoadBalancer struct { Settings *LoadBalancerSettings `json:",omitempty"` // ロードバランサー設定 } +// IsHA 冗長化されている場合にtrueを返す +func (l *LoadBalancer) IsHA() bool { + isHA := false + if len(l.Remark.Servers) > 1 { + if v, ok := l.Remark.Servers[1].(map[string]string); ok { + if _, ok := v["IPAddress"]; ok { + isHA = true + } + } + } + return isHA +} + +// IPAddress1 ロードバランサ本体のIPアドレス(1番目)を返す +func (l *LoadBalancer) IPAddress1() string { + if len(l.Remark.Servers) > 0 { + if v, ok := l.Remark.Servers[0].(map[string]string); ok { + if v, ok := v["IPAddress"]; ok { + return v + } + } + } + return "" +} + +// IPAddress2 ロードバランサ本体のIPアドレス(2番目)を返す +func (l *LoadBalancer) IPAddress2() string { + if len(l.Remark.Servers) > 1 { + if v, ok := l.Remark.Servers[1].(map[string]string); ok { + if v, ok := v["IPAddress"]; ok { + return v + } + } + } + return "" +} + // LoadBalancerRemark リマーク type LoadBalancerRemark struct { *ApplianceRemarkBase @@ -17,7 +56,7 @@ type LoadBalancerRemark struct { // LoadBalancerSettings ロードバランサー設定リスト type LoadBalancerSettings struct { - LoadBalancer []*LoadBalancerSetting `json:",omitempty"` // ロードバランサー設定リスト + LoadBalancer []*LoadBalancerSetting // ロードバランサー設定リスト } // LoadBalancerSetting ロードバランサー仮想IP設定 @@ -26,6 +65,7 @@ type LoadBalancerSetting struct { Port string `json:",omitempty"` // ポート番号 DelayLoop string `json:",omitempty"` // 監視間隔 SorryServer string `json:",omitempty"` // ソーリーサーバー + Description string `json:",omitempty"` // 説明 Servers []*LoadBalancerServer `json:",omitempty"` // 仮想IP配下の実サーバー } @@ -179,3 +219,73 @@ func (s *LoadBalancerSetting) DeleteServer(ip string, port string) { s.Servers = res } + +// LoadBalancerStatusResult ロードバランサーのステータスAPI戻り値 +type LoadBalancerStatusResult []*LoadBalancerStatus + +// Get VIPに対応するステータスを取得 +func (l *LoadBalancerStatusResult) Get(vip string) *LoadBalancerStatus { + for _, v := range *l { + if v.VirtualIPAddress == vip { + return v + } + } + return nil +} + +// LoadBalancerStatus ロードバランサーのステータス +type LoadBalancerStatus struct { + VirtualIPAddress string + Port string + Servers []*LoadBalancerServerStatus `json:",omitempty"` + CPS string +} + +// Get IPアドレスに対応する実サーバのステータスを取得 +func (l *LoadBalancerStatus) Get(ip string) *LoadBalancerServerStatus { + for _, v := range l.Servers { + if v.IPAddress == ip { + return v + } + } + return nil +} + +// NumCPS CPSを数値にして返す +func (l *LoadBalancerStatus) NumCPS() int { + v, _ := strconv.Atoi(l.CPS) // nolint - ignore error + return v +} + +// NumPort Portを数値にして返す +func (l *LoadBalancerStatus) NumPort() int { + v, _ := strconv.Atoi(l.Port) // nolint - ignore error + return v +} + +// LoadBalancerServerStatus ロードバランサーのVIP配下の実サーバのステータス +type LoadBalancerServerStatus struct { + ActiveConn string + IPAddress string + Status string + Port string + CPS string +} + +// NumActiveConn ActiveConnを数値にして返す +func (l *LoadBalancerServerStatus) NumActiveConn() int { + v, _ := strconv.Atoi(l.ActiveConn) // nolint - ignore error + return v +} + +// NumCPS CPSを数値にして返す +func (l *LoadBalancerServerStatus) NumCPS() int { + v, _ := strconv.Atoi(l.CPS) // nolint - ignore error + return v +} + +// NumPort Portを数値にして返す +func (l *LoadBalancerServerStatus) NumPort() int { + v, _ := strconv.Atoi(l.Port) // nolint - ignore error + return v +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/lock.go b/vendor/github.com/sacloud/libsacloud/sacloud/lock.go new file mode 100644 index 000000000..f8e66c9ba --- /dev/null +++ b/vendor/github.com/sacloud/libsacloud/sacloud/lock.go @@ -0,0 +1,29 @@ +package sacloud + +import ( + "fmt" + + "github.com/sacloud/libsacloud/utils/mutexkv" +) + +var resourceMu = mutexkv.NewMutexKV() + +// LockByKey 任意のキーでのMutexロック +func LockByKey(key string) { + resourceMu.Lock(key) +} + +// UnlockByKey 任意のキーでのMutexアンロック +func UnlockByKey(key string) { + resourceMu.Unlock(key) +} + +// LockByResourceID リソース単位でのMutexロック +func LockByResourceID(resourceID int64) { + resourceMu.Lock(fmt.Sprintf("%d", resourceID)) +} + +// UnlockByResourceID リソース単位でのMutexアンロック +func UnlockByResourceID(resourceID int64) { + resourceMu.Unlock(fmt.Sprintf("%d", resourceID)) +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/mobile_gateway.go b/vendor/github.com/sacloud/libsacloud/sacloud/mobile_gateway.go index 6e98d635e..a6e6dc4e0 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/mobile_gateway.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/mobile_gateway.go @@ -3,6 +3,7 @@ package sacloud import ( "encoding/json" "fmt" + "strconv" "strings" ) @@ -28,9 +29,71 @@ type MobileGatewaySettings struct { // MobileGatewaySetting モバイルゲートウェイ設定 type MobileGatewaySetting struct { - InternetConnection *MGWInternetConnection `json:",omitempty"` // インターネット接続 - Interfaces []*MGWInterface `json:",omitempty"` // インターフェース - StaticRoutes []*MGWStaticRoute `json:",omitempty"` // スタティックルート + InternetConnection *MGWInternetConnection `json:",omitempty"` // インターネット接続 + InterDeviceCommunication *MGWInterDeviceCommunication `json:",omitempty"` // デバイス間通信 + Interfaces []*MGWInterface `json:",omitempty"` // インターフェース + StaticRoutes []*MGWStaticRoute `json:",omitempty"` // スタティックルート +} + +// HasStaticRoutes スタティックルートを保持しているか +func (m *MobileGatewaySetting) HasStaticRoutes() bool { + return m.StaticRoutes != nil && len(m.StaticRoutes) > 0 +} + +// AddStaticRoute スタティックルート設定 追加 +func (m *MobileGatewaySetting) AddStaticRoute(prefix string, nextHop string) (int, *MGWStaticRoute) { + if m.StaticRoutes == nil { + m.StaticRoutes = []*MGWStaticRoute{} + } + + s := &MGWStaticRoute{ + Prefix: prefix, + NextHop: nextHop, + } + m.StaticRoutes = append(m.StaticRoutes, s) + return len(m.StaticRoutes) - 1, s +} + +// RemoveStaticRoute スタティックルート設定 削除 +func (m *MobileGatewaySetting) RemoveStaticRoute(prefix string, nextHop string) { + if m.StaticRoutes == nil { + return + } + + dest := []*MGWStaticRoute{} + for _, s := range m.StaticRoutes { + if s.Prefix != prefix || s.NextHop != nextHop { + dest = append(dest, s) + } + } + m.StaticRoutes = dest +} + +// RemoveStaticRouteAt スタティックルート設定 削除 +func (m *MobileGatewaySetting) RemoveStaticRouteAt(index int) { + if m.StaticRoutes == nil { + return + } + + if index < len(m.StaticRoutes) { + s := m.StaticRoutes[index] + m.RemoveStaticRoute(s.Prefix, s.NextHop) + } +} + +// FindStaticRoute スタティックルート設定 検索 +func (m *MobileGatewaySetting) FindStaticRoute(prefix string, nextHop string) (int, *MGWStaticRoute) { + for i, s := range m.StaticRoutes { + if s.Prefix == prefix && s.NextHop == nextHop { + return i, s + } + } + return -1, nil +} + +// MGWInterDeviceCommunication デバイス間通信 +type MGWInterDeviceCommunication struct { + Enabled string `json:",omitempty"` } // MGWInternetConnection インターネット接続 @@ -121,6 +184,68 @@ func (m *MobileGateway) ClearPrivateInterface() { m.Settings.MobileGateway.Interfaces = []*MGWInterface{nil} } +// HasSetting モバイルゲートウェイ設定を保持しているか +func (m *MobileGateway) HasSetting() bool { + return m.Settings != nil && m.Settings.MobileGateway != nil +} + +// HasStaticRoutes スタティックルートを保持しているか +func (m *MobileGateway) HasStaticRoutes() bool { + return m.HasSetting() && m.Settings.MobileGateway.HasStaticRoutes() +} + +// InternetConnection インターネット接続が有効な場合にTrueを返す +func (m *MobileGateway) InternetConnection() bool { + return m.HasSetting() && + m.Settings.MobileGateway.InternetConnection != nil && + m.Settings.MobileGateway.InternetConnection.Enabled == "True" +} + +// InterDeviceCommunication デバイス間通信が有効な場合にTrueを返す +func (m *MobileGateway) InterDeviceCommunication() bool { + return m.HasSetting() && + m.Settings.MobileGateway.InterDeviceCommunication != nil && + m.Settings.MobileGateway.InterDeviceCommunication.Enabled == "True" +} + +// IPAddress 0番目のNICのIPアドレスを取得 +func (m *MobileGateway) IPAddress() string { + return m.IPAddressAt(0) +} + +// IPAddressAt IPアドレスを取得 +func (m *MobileGateway) IPAddressAt(index int) string { + if len(m.Interfaces) <= index { + return "" + } + if index == 0 { + return m.Interfaces[0].IPAddress + } + + ipaddresses := m.Settings.MobileGateway.Interfaces[index].IPAddress + if len(ipaddresses) < 1 { + return "" + } + return ipaddresses[0] +} + +// NetworkMaskLen 0番目のNICのネットワークマスク長を取得 +func (m *MobileGateway) NetworkMaskLen() int { + return m.NetworkMaskLenAt(0) +} + +// NetworkMaskLenAt ネットワークマスク長を取得 +func (m *MobileGateway) NetworkMaskLenAt(index int) int { + if len(m.Interfaces) <= index { + return -1 + } + if index == 0 { + return m.Interfaces[0].Switch.UserSubnet.NetworkMaskLen + } + + return m.Settings.MobileGateway.Interfaces[0].NetworkMaskLen +} + // NewMobileGatewayResolver DNS登録用パラメータ作成 func NewMobileGatewayResolver(dns1, dns2 string) *MobileGatewayResolver { return &MobileGatewayResolver{ @@ -175,7 +300,7 @@ type MobileGatewaySIMRoutes struct { } // AddSIMRoute SIMルート追加 -func (m *MobileGatewaySIMRoutes) AddSIMRoute(simID int64, prefix string) bool { +func (m *MobileGatewaySIMRoutes) AddSIMRoute(simID int64, prefix string) (int, *MobileGatewaySIMRoute) { var exists bool for _, route := range m.SIMRoutes { if route.ResourceID == fmt.Sprintf("%d", simID) && route.Prefix == prefix { @@ -184,12 +309,14 @@ func (m *MobileGatewaySIMRoutes) AddSIMRoute(simID int64, prefix string) bool { } } if !exists { - m.SIMRoutes = append(m.SIMRoutes, &MobileGatewaySIMRoute{ + r := &MobileGatewaySIMRoute{ ResourceID: fmt.Sprintf("%d", simID), Prefix: prefix, - }) + } + m.SIMRoutes = append(m.SIMRoutes, r) + return len(m.SIMRoutes) - 1, r } - return !exists + return -1, nil } // DeleteSIMRoute SIMルート削除 @@ -207,3 +334,79 @@ func (m *MobileGatewaySIMRoutes) DeleteSIMRoute(simID int64, prefix string) bool m.SIMRoutes = routes return exists } + +// DeleteSIMRouteAt SIMルート削除 +func (m *MobileGatewaySIMRoutes) DeleteSIMRouteAt(index int) bool { + if m.SIMRoutes == nil { + return false + } + + if index < len(m.SIMRoutes) { + s := m.SIMRoutes[index] + if simID, err := strconv.ParseInt(s.ResourceID, 10, 64); err == nil { + return m.DeleteSIMRoute(simID, s.Prefix) + } + } + return false +} + +// FindSIMRoute SIMルート設定 検索 +func (m *MobileGatewaySIMRoutes) FindSIMRoute(simID int64, prefix string) (int, *MobileGatewaySIMRoute) { + for i, r := range m.SIMRoutes { + if r.Prefix == prefix && r.ResourceID == fmt.Sprintf("%d", simID) { + return i, r + } + } + return -1, nil +} + +// TrafficStatus トラフィックコントロール 当月通信量 +type TrafficStatus struct { + UplinkBytes uint64 `json:"uplink_bytes,omitempty"` + DownlinkBytes uint64 `json:"downlink_bytes,omitempty"` + TrafficShaping bool `json:"traffic_shaping"` // 帯域制限 +} + +// UnmarshalJSON JSONアンマーシャル(uint64文字列対応) +func (s *TrafficStatus) UnmarshalJSON(data []byte) error { + tmp := &struct { + UplinkBytes string `json:"uplink_bytes,omitempty"` + DownlinkBytes string `json:"downlink_bytes,omitempty"` + TrafficShaping bool `json:"traffic_shaping"` + }{} + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + var err error + s.UplinkBytes, err = strconv.ParseUint(tmp.UplinkBytes, 10, 64) + if err != nil { + return err + } + s.DownlinkBytes, err = strconv.ParseUint(tmp.DownlinkBytes, 10, 64) + if err != nil { + return err + } + s.TrafficShaping = tmp.TrafficShaping + return nil +} + +// TrafficMonitoringConfig トラフィックコントロール 設定 +type TrafficMonitoringConfig struct { + TrafficQuotaInMB int `json:"traffic_quota_in_mb"` + BandWidthLimitInKbps int `json:"bandwidth_limit_in_kbps"` + EMailConfig *TrafficMonitoringNotifyEmail `json:"email_config"` + SlackConfig *TrafficMonitoringNotifySlack `json:"slack_config"` + AutoTrafficShaping bool `json:"auto_traffic_shaping"` +} + +// TrafficMonitoringNotifyEmail トラフィックコントロール通知設定 +type TrafficMonitoringNotifyEmail struct { + Enabled bool `json:"enabled"` // 有効/無効 +} + +// TrafficMonitoringNotifySlack トラフィックコントロール通知設定 +type TrafficMonitoringNotifySlack struct { + Enabled bool `json:"enabled"` // 有効/無効 + IncomingWebhooksURL string `json:"slack_url,omitempty"` // Slack通知の場合のWebhook URL +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/monitor.go b/vendor/github.com/sacloud/libsacloud/sacloud/monitor.go index f4ecc061c..1cfa2534d 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/monitor.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/monitor.go @@ -9,23 +9,27 @@ import ( // MonitorValue アクティビティモニター type MonitorValue struct { - CPUTime *float64 `json:"CPU-TIME,omitempty"` // CPU時間 - Write *float64 `json:",omitempty"` // ディスク書き込み - Read *float64 `json:",omitempty"` // ディスク読み取り - Receive *float64 `json:",omitempty"` // パケット受信 - Send *float64 `json:",omitempty"` // パケット送信 - In *float64 `json:",omitempty"` // パケット受信 - Out *float64 `json:",omitempty"` // パケット送信 - TotalMemorySize *float64 `json:"Total-Memory-Size,omitempty"` // 総メモリサイズ - UsedMemorySize *float64 `json:"Used-Memory-Size,omitempty"` // 使用済みメモリサイズ - TotalDisk1Size *float64 `json:"Total-Disk1-Size,omitempty"` // 総ディスクサイズ - UsedDisk1Size *float64 `json:"Used-Disk1-Size,omitempty"` // 使用済みディスクサイズ - TotalDisk2Size *float64 `json:"Total-Disk2-Size,omitempty"` // 総ディスクサイズ - UsedDisk2Size *float64 `json:"Used-Disk2-Size,omitempty"` // 使用済みディスクサイズ - FreeDiskSize *float64 `json:"Free-Disk-Size,omitempty"` // 空きディスクサイズ(NFS) - ResponseTimeSec *float64 `json:"responsetimesec,omitempty"` // レスポンスタイム(シンプル監視) - UplinkBPS *float64 `json:"UplinkBps,omitempty"` // 上り方向トラフィック - DownlinkBPS *float64 `json:"DownlinkBps"` // 下り方向トラフィック + CPUTime *float64 `json:"CPU-TIME,omitempty"` // CPU時間 + Write *float64 `json:",omitempty"` // ディスク書き込み + Read *float64 `json:",omitempty"` // ディスク読み取り + Receive *float64 `json:",omitempty"` // パケット受信 + Send *float64 `json:",omitempty"` // パケット送信 + In *float64 `json:",omitempty"` // パケット受信 + Out *float64 `json:",omitempty"` // パケット送信 + TotalMemorySize *float64 `json:"Total-Memory-Size,omitempty"` // 総メモリサイズ + UsedMemorySize *float64 `json:"Used-Memory-Size,omitempty"` // 使用済みメモリサイズ + TotalDisk1Size *float64 `json:"Total-Disk1-Size,omitempty"` // 総ディスクサイズ + UsedDisk1Size *float64 `json:"Used-Disk1-Size,omitempty"` // 使用済みディスクサイズ + TotalDisk2Size *float64 `json:"Total-Disk2-Size,omitempty"` // 総ディスクサイズ + UsedDisk2Size *float64 `json:"Used-Disk2-Size,omitempty"` // 使用済みディスクサイズ + BinlogUsedSizeKiB *float64 `json:"binlogUsedSizeKiB,omitempty"` // バイナリログのサイズ(レプリケーション有効時のみ、master/slave両方で利用可能) + DelayTimeSec *float64 `json:"delayTimeSec,omitempty"` // レプリケーション遅延時間(レプリケーション有効時のみ、slave側のみ) + FreeDiskSize *float64 `json:"Free-Disk-Size,omitempty"` // 空きディスクサイズ(NFS) + ResponseTimeSec *float64 `json:"responsetimesec,omitempty"` // レスポンスタイム(シンプル監視) + UplinkBPS *float64 `json:"UplinkBps,omitempty"` // 上り方向トラフィック + DownlinkBPS *float64 `json:"DownlinkBps,omitempty"` // 下り方向トラフィック + ActiveConnections *float64 `json:"activeConnections,omitempty"` // アクティブコネクション(プロキシLB) + ConnectionsPerSec *float64 `json:"connectionsPerSec,omitempty"` // 秒間コネクション数 } // UnmarshalJSON JSONアンマーシャル(配列、オブジェクトが混在するためここで対応) @@ -36,23 +40,27 @@ func (m *MonitorValue) UnmarshalJSON(data []byte) error { } tmp := &struct { - CPUTime *float64 `json:"CPU-TIME,omitempty"` - Write *float64 `json:",omitempty"` - Read *float64 `json:",omitempty"` - Receive *float64 `json:",omitempty"` - Send *float64 `json:",omitempty"` - In *float64 `json:",omitempty"` - Out *float64 `json:",omitempty"` - TotalMemorySize *float64 `json:"Total-Memory-Size,omitempty"` - UsedMemorySize *float64 `json:"Used-Memory-Size,omitempty"` - TotalDisk1Size *float64 `json:"Total-Disk1-Size,omitempty"` - UsedDisk1Size *float64 `json:"Used-Disk1-Size,omitempty"` - TotalDisk2Size *float64 `json:"Total-Disk2-Size,omitempty"` - UsedDisk2Size *float64 `json:"Used-Disk2-Size,omitempty"` - FreeDiskSize *float64 `json:"Free-Disk-Size,omitempty"` - ResponseTimeSec *float64 `json:"responsetimesec,omitempty"` - UplinkBPS *float64 `json:"UplinkBps,omitempty"` - DownlinkBPS *float64 `json:"DownlinkBps"` + CPUTime *float64 `json:"CPU-TIME,omitempty"` + Write *float64 `json:",omitempty"` + Read *float64 `json:",omitempty"` + Receive *float64 `json:",omitempty"` + Send *float64 `json:",omitempty"` + In *float64 `json:",omitempty"` + Out *float64 `json:",omitempty"` + TotalMemorySize *float64 `json:"Total-Memory-Size,omitempty"` + UsedMemorySize *float64 `json:"Used-Memory-Size,omitempty"` + TotalDisk1Size *float64 `json:"Total-Disk1-Size,omitempty"` + UsedDisk1Size *float64 `json:"Used-Disk1-Size,omitempty"` + TotalDisk2Size *float64 `json:"Total-Disk2-Size,omitempty"` + UsedDisk2Size *float64 `json:"Used-Disk2-Size,omitempty"` + BinlogUsedSizeKiB *float64 `json:"binlogUsedSizeKiB,omitempty"` + DelayTimeSec *float64 `json:"delayTimeSec,omitempty"` + FreeDiskSize *float64 `json:"Free-Disk-Size,omitempty"` + ResponseTimeSec *float64 `json:"responsetimesec,omitempty"` + UplinkBPS *float64 `json:"UplinkBps,omitempty"` + DownlinkBPS *float64 `json:"DownlinkBps,omitempty"` + ActiveConnections *float64 `json:"activeConnections,omitempty"` + ConnectionsPerSec *float64 `json:"connectionsPerSec,omitempty"` }{} if err := json.Unmarshal(data, &tmp); err != nil { return err @@ -71,10 +79,14 @@ func (m *MonitorValue) UnmarshalJSON(data []byte) error { m.UsedDisk1Size = tmp.UsedDisk1Size m.TotalDisk2Size = tmp.TotalDisk2Size m.UsedDisk2Size = tmp.UsedDisk2Size + m.BinlogUsedSizeKiB = tmp.BinlogUsedSizeKiB + m.DelayTimeSec = tmp.DelayTimeSec m.FreeDiskSize = tmp.FreeDiskSize m.ResponseTimeSec = tmp.ResponseTimeSec m.UplinkBPS = tmp.UplinkBPS m.DownlinkBPS = tmp.DownlinkBPS + m.ActiveConnections = tmp.ActiveConnections + m.ConnectionsPerSec = tmp.ConnectionsPerSec return nil } @@ -104,6 +116,23 @@ type ResourceMonitorResponse struct { Data *MonitorValues `json:",omitempty"` // メトリクス } +// UnmarshalJSON JSONアンマーシャル(配列、オブジェクトが混在するためここで対応) +func (m *MonitorValues) UnmarshalJSON(data []byte) error { + targetData := strings.Replace(strings.Replace(string(data), " ", "", -1), "\n", "", -1) + if targetData == `[]` { + return nil + } + + tmp := map[string]*MonitorValue{} + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + value := MonitorValues(tmp) + *m = value + return nil +} + // MonitorSummaryData メトリクスサマリー type MonitorSummaryData struct { Max float64 // 最大値 @@ -242,6 +271,16 @@ func (m *MonitorValues) FlattenUsedDisk2SizeValue() ([]FlatMonitorValue, error) return m.flattenValue(func(v *MonitorValue) *float64 { return v.UsedDisk2Size }) } +// FlattenBinlogUsedSizeKiBValue フラット化 バイナリログサイズ +func (m *MonitorValues) FlattenBinlogUsedSizeKiBValue() ([]FlatMonitorValue, error) { + return m.flattenValue(func(v *MonitorValue) *float64 { return v.BinlogUsedSizeKiB }) +} + +// FlattenDelayTimeSecValue フラット化 レプリケーション遅延時間 +func (m *MonitorValues) FlattenDelayTimeSecValue() ([]FlatMonitorValue, error) { + return m.flattenValue(func(v *MonitorValue) *float64 { return v.DelayTimeSec }) +} + // FlattenFreeDiskSizeValue フラット化 空きディスクサイズ(NFS) func (m *MonitorValues) FlattenFreeDiskSizeValue() ([]FlatMonitorValue, error) { return m.flattenValue(func(v *MonitorValue) *float64 { return v.FreeDiskSize }) @@ -262,6 +301,16 @@ func (m *MonitorValues) FlattenDownlinkBPSValue() ([]FlatMonitorValue, error) { return m.flattenValue(func(v *MonitorValue) *float64 { return v.DownlinkBPS }) } +// FlattenActiveConnections フラット化 アクティブコネクション +func (m *MonitorValues) FlattenActiveConnections() ([]FlatMonitorValue, error) { + return m.flattenValue(func(v *MonitorValue) *float64 { return v.ActiveConnections }) +} + +// FlattenConnectionsPerSec フラット化 秒間接続数 +func (m *MonitorValues) FlattenConnectionsPerSec() ([]FlatMonitorValue, error) { + return m.flattenValue(func(v *MonitorValue) *float64 { return v.ConnectionsPerSec }) +} + func (m *MonitorValues) flattenValue(f func(*MonitorValue) *float64) ([]FlatMonitorValue, error) { var res []FlatMonitorValue @@ -293,8 +342,10 @@ func (m *MonitorValue) HasValue() bool { m.TotalMemorySize, m.UsedMemorySize, m.TotalDisk1Size, m.UsedDisk1Size, m.TotalDisk2Size, m.UsedDisk2Size, + m.BinlogUsedSizeKiB, m.DelayTimeSec, m.FreeDiskSize, m.ResponseTimeSec, m.UplinkBPS, m.DownlinkBPS, + m.ActiveConnections, m.ConnectionsPerSec, } for _, v := range values { if v != nil { diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/nfs.go b/vendor/github.com/sacloud/libsacloud/sacloud/nfs.go index 7d91a48ec..ee897e091 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/nfs.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/nfs.go @@ -1,5 +1,9 @@ package sacloud +import ( + "encoding/json" +) + // NFS NFS type NFS struct { *Appliance // アプライアンス共通属性 @@ -11,47 +15,97 @@ type NFS struct { // NFSRemark リマーク type NFSRemark struct { *ApplianceRemarkBase - propPlanID + Plan *struct { + ID json.Number `json:",omitempty"` + } `json:",omitempty"` // プラン // TODO Zone //Zone *Resource //SourceAppliance *Resource // クローン元DB } +// SetRemarkPlanID プランID設定 +func (n NFSRemark) SetRemarkPlanID(planID int64) { + if n.Plan == nil { + n.Plan = &struct { + ID json.Number `json:",omitempty"` + }{} + } + n.Plan.ID = json.Number(planID) +} + // NFSSettings NFS設定リスト type NFSSettings struct { } -// NFSPlan NFSプラン +// NFSPlan プラン(HDD/SSD) type NFSPlan int var ( - // NFSPlan100G 100Gプラン - NFSPlan100G = NFSPlan(100) - // NFSPlan500G 500Gプラン - NFSPlan500G = NFSPlan(500) - // NFSPlan1T 1T(1024GB)プラン - NFSPlan1T = NFSPlan(1024 * 1) - // NFSPlan2T 2T(2048GB)プラン - NFSPlan2T = NFSPlan(1024 * 2) - // NFSPlan4T 4T(4096GB)プラン - NFSPlan4T = NFSPlan(1024 * 4) + // NFSPlanHDD 標準プラン(HDD) + NFSPlanHDD = NFSPlan(1) + // NFSPlanSSD SSHプラン + NFSPlanSSD = NFSPlan(2) ) -// AllowNFSPlans 指定可能なNFSプラン -func AllowNFSPlans() []int { +// String NFSプランの文字列表現 +func (p NFSPlan) String() string { + switch p { + case NFSPlanHDD: + return "HDD" + case NFSPlanSSD: + return "SSD" + default: + return "" + } +} + +// NFSSize NFSサイズ +type NFSSize int + +var ( + // NFSSize100G 100Gプラン + NFSSize100G = NFSSize(100) + // NFSSize500G 500Gプラン + NFSSize500G = NFSSize(500) + // NFSSize1T 1T(1024GB)プラン + NFSSize1T = NFSSize(1024 * 1) + // NFSSize2T 2T(2048GB)プラン + NFSSize2T = NFSSize(1024 * 2) + // NFSSize4T 4T(4096GB)プラン + NFSSize4T = NFSSize(1024 * 4) + // NFSSize8T 8TBプラン + NFSSize8T = NFSSize(1024 * 8) + // NFSSize12T 12TBプラン + NFSSize12T = NFSSize(1024 * 12) +) + +// AllowNFSNormalPlanSizes 指定可能なNFSサイズ(標準プラン) +func AllowNFSNormalPlanSizes() []int { return []int{ - int(NFSPlan100G), - int(NFSPlan500G), - int(NFSPlan1T), - int(NFSPlan2T), - int(NFSPlan4T), + int(NFSSize100G), + int(NFSSize500G), + int(NFSSize1T), + int(NFSSize2T), + int(NFSSize4T), + int(NFSSize8T), + int(NFSSize12T), + } +} + +// AllowNFSSSDPlanSizes 指定可能なNFSサイズ(SSDプラン) +func AllowNFSSSDPlanSizes() []int { + return []int{ + int(NFSSize100G), + int(NFSSize500G), + int(NFSSize1T), + int(NFSSize2T), + int(NFSSize4T), } } // CreateNFSValue NFS作成用パラメーター type CreateNFSValue struct { SwitchID string // 接続先スイッチID - Plan NFSPlan // プラン IPAddress string // IPアドレス MaskLen int // ネットワークマスク長 DefaultRoute string // デフォルトルート @@ -62,27 +116,16 @@ type CreateNFSValue struct { SourceAppliance *Resource // クローン元NFS } -// NewCreateNFSValue NFS作成用パラメーター -func NewCreateNFSValue() *CreateNFSValue { - return &CreateNFSValue{ - Plan: NFSPlan100G, - } -} - // NewNFS NFS作成(冗長化なし) func NewNFS(values *CreateNFSValue) *NFS { - if int(values.Plan) == 0 { - values.Plan = NFSPlan100G - } - return &NFS{ Appliance: &Appliance{ Class: "nfs", propName: propName{Name: values.Name}, propDescription: propDescription{Description: values.Description}, propTags: propTags{Tags: values.Tags}, - propPlanID: propPlanID{Plan: &Resource{ID: int64(values.Plan)}}, + //propPlanID: propPlanID{Plan: &Resource{ID: int64(values.Plan)}}, propIcon: propIcon{ &Icon{ Resource: values.Icon, @@ -99,12 +142,107 @@ func NewNFS(values *CreateNFSValue) *NFS { DefaultRoute: values.DefaultRoute, }, Servers: []interface{}{ - map[string]string{"IPAddress": values.IPAddress}, + map[string]interface{}{"IPAddress": values.IPAddress}, }, }, - propPlanID: propPlanID{Plan: &Resource{ID: int64(values.Plan)}}, - //SourceAppliance: values.SourceAppliance, + //propPlanID: propPlanID{Plan: &Resource{ID: int64(values.Plan)}}, }, } } + +// IPAddress IPアドレスを取得 +func (n *NFS) IPAddress() string { + if len(n.Remark.Servers) < 1 { + return "" + } + + v, ok := n.Remark.Servers[0].(map[string]interface{}) + if !ok { + return "" + } + + if ip, ok := v["IPAddress"]; ok { + return ip.(string) + } + return "" +} + +// NetworkMaskLen ネットワークマスク長を取得 +func (n *NFS) NetworkMaskLen() int { + if n.Remark.Network == nil { + return -1 + } + return n.Remark.Network.NetworkMaskLen +} + +// DefaultRoute デフォルトゲートウェイを取得 +func (n *NFS) DefaultRoute() string { + if n.Remark.Network == nil { + return "" + } + return n.Remark.Network.DefaultRoute +} + +// NFSPlans NFSプラン +type NFSPlans struct { + HDD []NFSPlanValue + SSD []NFSPlanValue +} + +// FindPlanID プランとサイズからプランIDを取得 +func (p NFSPlans) FindPlanID(plan NFSPlan, size NFSSize) int64 { + var plans []NFSPlanValue + switch plan { + case NFSPlanHDD: + plans = p.HDD + case NFSPlanSSD: + plans = p.SSD + default: + return -1 + } + + for _, plan := range plans { + if plan.Availability == "available" && plan.Size == int(size) { + res, err := plan.PlanID.Int64() + if err != nil { + return -1 + } + return res + } + } + + return -1 +} + +// FindByPlanID プランIDから該当プランを取得 +func (p NFSPlans) FindByPlanID(planID int64) (NFSPlan, *NFSPlanValue) { + + for _, plan := range p.SSD { + id, err := plan.PlanID.Int64() + if err != nil { + continue + } + if id == planID { + return NFSPlanSSD, &plan + } + } + + for _, plan := range p.HDD { + id, err := plan.PlanID.Int64() + if err != nil { + continue + } + if id == planID { + return NFSPlanHDD, &plan + } + } + return NFSPlan(-1), nil +} + +// NFSPlanValue NFSプラン +type NFSPlanValue struct { + Size int `json:"size"` + Availability string `json:"availability"` + PlanID json.Number `json:"planId"` +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/ostype/archive_ostype.go b/vendor/github.com/sacloud/libsacloud/sacloud/ostype/archive_ostype.go index a53fd1b2c..29d754857 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/ostype/archive_ostype.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/ostype/archive_ostype.go @@ -27,6 +27,10 @@ const ( SophosUTM // FreeBSD OS種別:FreeBSD FreeBSD + // Netwiser OS種別: Netwiser Virtual Edition + Netwiser + // OPNsense OS種別: OPNsense + OPNsense // Windows2012 OS種別:Windows Server 2012 R2 Datacenter Edition Windows2012 // Windows2012RDS OS種別:Windows Server 2012 R2 for RDS @@ -41,10 +45,16 @@ const ( Windows2016RDSOffice // Windows2016SQLServerWeb OS種別:Windows Server 2016 SQLServer(Web) Windows2016SQLServerWeb - // Windows2016SQLServerStandard OS種別:Windows Server 2016 SQLServer(Standard) + // Windows2016SQLServerStandard OS種別:Windows Server 2016 SQLServer 2016(Standard) Windows2016SQLServerStandard + // Windows2016SQLServer2017Standard OS種別:Windows Server 2016 SQLServer 2017(Standard) + Windows2016SQLServer2017Standard // Windows2016SQLServerStandardAll OS種別:Windows Server 2016 SQLServer(Standard) + RDS + Office Windows2016SQLServerStandardAll + // Windows2016SQLServer2017StandardAll OS種別:Windows Server 2016 SQLServer 2017(Standard) + RDS + Office + Windows2016SQLServer2017StandardAll + // Windows2019 OS種別:Windows Server 2019 Datacenter Edition + Windows2019 // Custom OS種別:カスタム Custom ) @@ -53,9 +63,12 @@ const ( var OSTypeShortNames = []string{ "centos", "centos6", "ubuntu", "debian", "vyos", "coreos", "rancheros", "kusanagi", "sophos-utm", "freebsd", + "netwiser", "opnsense", "windows2012", "windows2012-rds", "windows2012-rds-office", "windows2016", "windows2016-rds", "windows2016-rds-office", "windows2016-sql-web", "windows2016-sql-standard", "windows2016-sql-standard-all", + "windows2016-sql2017-standard", "windows2016-sql2017-standard-all", + "windows2019", } // IsWindows Windowsか @@ -63,7 +76,9 @@ func (o ArchiveOSTypes) IsWindows() bool { switch o { case Windows2012, Windows2012RDS, Windows2012RDSOffice, Windows2016, Windows2016RDS, Windows2016RDSOffice, - Windows2016SQLServerWeb, Windows2016SQLServerStandard, Windows2016SQLServerStandardAll: + Windows2016SQLServerWeb, Windows2016SQLServerStandard, Windows2016SQLServerStandardAll, + Windows2016SQLServer2017Standard, Windows2016SQLServer2017StandardAll, + Windows2019: return true default: return false @@ -103,6 +118,10 @@ func StrToOSType(osType string) ArchiveOSTypes { return SophosUTM case "freebsd": return FreeBSD + case "netwiser": + return Netwiser + case "opnsense": + return OPNsense case "windows2012": return Windows2012 case "windows2012-rds": @@ -119,8 +138,14 @@ func StrToOSType(osType string) ArchiveOSTypes { return Windows2016SQLServerWeb case "windows2016-sql-standard": return Windows2016SQLServerStandard + case "windows2016-sql2017-standard": + return Windows2016SQLServer2017Standard case "windows2016-sql-standard-all": return Windows2016SQLServerStandardAll + case "windows2016-sql2017-standard-all": + return Windows2016SQLServer2017StandardAll + case "windows2019": + return Windows2019 default: return Custom } diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/ostype/archiveostypes_string.go b/vendor/github.com/sacloud/libsacloud/sacloud/ostype/archiveostypes_string.go index 74656b860..77d31e273 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/ostype/archiveostypes_string.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/ostype/archiveostypes_string.go @@ -4,9 +4,9 @@ package ostype import "strconv" -const _ArchiveOSTypes_name = "CentOSCentOS6UbuntuDebianVyOSCoreOSRancherOSKusanagiSophosUTMFreeBSDWindows2012Windows2012RDSWindows2012RDSOfficeWindows2016Windows2016RDSWindows2016RDSOfficeWindows2016SQLServerWebWindows2016SQLServerStandardWindows2016SQLServerStandardAllCustom" +const _ArchiveOSTypes_name = "CentOSCentOS6UbuntuDebianVyOSCoreOSRancherOSKusanagiSophosUTMFreeBSDNetwiserOPNsenseWindows2012Windows2012RDSWindows2012RDSOfficeWindows2016Windows2016RDSWindows2016RDSOfficeWindows2016SQLServerWebWindows2016SQLServerStandardWindows2016SQLServer2017StandardWindows2016SQLServerStandardAllWindows2016SQLServer2017StandardAllWindows2019Custom" -var _ArchiveOSTypes_index = [...]uint8{0, 6, 13, 19, 25, 29, 35, 44, 52, 61, 68, 79, 93, 113, 124, 138, 158, 181, 209, 240, 246} +var _ArchiveOSTypes_index = [...]uint16{0, 6, 13, 19, 25, 29, 35, 44, 52, 61, 68, 76, 84, 95, 109, 129, 140, 154, 174, 197, 225, 257, 288, 323, 334, 340} func (i ArchiveOSTypes) String() string { if i < 0 || i >= ArchiveOSTypes(len(_ArchiveOSTypes_index)-1) { diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/product_server.go b/vendor/github.com/sacloud/libsacloud/sacloud/product_server.go index 9427ec0dd..593c342f2 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/product_server.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/product_server.go @@ -2,11 +2,12 @@ package sacloud // ProductServer サーバープラン type ProductServer struct { - *Resource // ID - propName // 名称 - propDescription // 説明 - propAvailability // 有功状態 - propCPU // CPUコア数 - propMemoryMB // メモリサイズ(MB単位) - propServiceClass // サービスクラス + *Resource // ID + propName // 名称 + propDescription // 説明 + propAvailability // 有功状態 + propCPU // CPUコア数 + propMemoryMB // メモリサイズ(MB単位) + propServiceClass // サービスクラス + Generation PlanGenerations `json:",omitempty"` // 世代 } diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/prop_memory.go b/vendor/github.com/sacloud/libsacloud/sacloud/prop_memory.go index 5c503a33b..da08b53b4 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/prop_memory.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/prop_memory.go @@ -17,3 +17,8 @@ func (p *propMemoryMB) GetMemoryGB() int { } return p.MemoryMB / 1024 } + +// SetMemoryGB サイズ(GB単位) 設定 +func (p *propMemoryMB) SetMemoryGB(memoryGB int) { + p.MemoryMB = memoryGB * 1024 +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/prop_server_plan.go b/vendor/github.com/sacloud/libsacloud/sacloud/prop_server_plan.go index b5fa34540..3b33862fe 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/prop_server_plan.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/prop_server_plan.go @@ -23,6 +23,15 @@ func (p *propServerPlan) SetServerPlanByID(planID string) { p.ServerPlan.Resource = NewResourceByStringID(planID) } +// SetServerPlanByValue サーバープラン設定(値指定) +func (p *propServerPlan) SetServerPlanByValue(cpu int, memoryGB int, gen PlanGenerations) { + plan := &ProductServer{} + plan.CPU = cpu + plan.SetMemoryGB(memoryGB) + plan.Generation = gen + p.ServerPlan = plan +} + // GetCPU CPUコア数 取得 func (p *propServerPlan) GetCPU() int { if p.ServerPlan == nil { @@ -49,3 +58,7 @@ func (p *propServerPlan) GetMemoryGB() int { return p.ServerPlan.GetMemoryGB() } + +func (p *propServerPlan) SetMemoryGB(memoryGB int) { + p.ServerPlan.SetMemoryGB(memoryGB) +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/prop_wait_disk_migration.go b/vendor/github.com/sacloud/libsacloud/sacloud/prop_wait_disk_migration.go new file mode 100644 index 000000000..ed2da17b4 --- /dev/null +++ b/vendor/github.com/sacloud/libsacloud/sacloud/prop_wait_disk_migration.go @@ -0,0 +1,16 @@ +package sacloud + +// propWaitDiskMigration ディスク作成待ちフラグ内包型 +type propWaitDiskMigration struct { + WaitDiskMigration bool `json:",omitempty"` +} + +// GetWaitDiskMigration ディスク作成待ちフラグ 取得 +func (p *propWaitDiskMigration) GetWaitDiskMigration() bool { + return p.WaitDiskMigration +} + +// SetWaitDiskMigration ディスク作成待ちフラグ 設定 +func (p *propWaitDiskMigration) SetWaitDiskMigration(f bool) { + p.WaitDiskMigration = f +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/proxylb.go b/vendor/github.com/sacloud/libsacloud/sacloud/proxylb.go new file mode 100644 index 000000000..64523e6a8 --- /dev/null +++ b/vendor/github.com/sacloud/libsacloud/sacloud/proxylb.go @@ -0,0 +1,517 @@ +package sacloud + +import ( + "crypto/x509" + "encoding/json" + "encoding/pem" + "fmt" + "strconv" + "strings" + "time" +) + +// ProxyLB ProxyLB(CommonServiceItem) +type ProxyLB struct { + *Resource // ID + propName // 名称 + propDescription // 説明 + propServiceClass // サービスクラス + propIcon // アイコン + propTags // タグ + propCreatedAt // 作成日時 + propModifiedAt // 変更日時 + propAvailability // 有効状態 + + Status *ProxyLBStatus `json:",omitempty"` // ステータス + Provider ProxyLBProvider `json:",omitempty"` // プロバイダ + Settings ProxyLBSettings `json:",omitempty"` // ProxyLB設定 + +} + +// ProxyLBSettings ProxyLB設定 +type ProxyLBSettings struct { + ProxyLB ProxyLBSetting `json:",omitempty"` // ProxyLB ProxyLBエントリー +} + +// ProxyLBStatus ProxyLBステータス +type ProxyLBStatus struct { + FQDN string `json:",omitempty"` // 割り当てられたFQDN(site-*******.proxylb?.sakura.ne.jp) UseVIPFailoverがtrueの場合のみ有効 + VirtualIPAddress string `json:",omitempty"` // 割り当てられたVIP UseVIPFailoverがfalseの場合のみ有効 + ProxyNetworks []string `json:",omitempty"` // プロキシ元ネットワークアドレス(CIDR) + UseVIPFailover bool // VIPフェイルオーバ +} + +// ProxyLBProvider プロバイダ +type ProxyLBProvider struct { + Class string `json:",omitempty"` // クラス +} + +// CreateNewProxyLB ProxyLB作成 +func CreateNewProxyLB(name string) *ProxyLB { + return &ProxyLB{ + Resource: &Resource{}, + propName: propName{Name: name}, + Provider: ProxyLBProvider{ + Class: "proxylb", + }, + Settings: ProxyLBSettings{ + ProxyLB: ProxyLBSetting{ + HealthCheck: defaultProxyLBHealthCheck, + SorryServer: ProxyLBSorryServer{}, + Servers: []ProxyLBServer{}, + }, + }, + } +} + +// ProxyLBPlan ProxyLBプラン +type ProxyLBPlan int + +var ( + // ProxyLBPlan1000 1,000cpsプラン + ProxyLBPlan1000 = ProxyLBPlan(1000) + // ProxyLBPlan5000 5,000cpsプラン + ProxyLBPlan5000 = ProxyLBPlan(5000) + // ProxyLBPlan10000 10,000cpsプラン + ProxyLBPlan10000 = ProxyLBPlan(10000) + // ProxyLBPlan50000 50,000cpsプラン + ProxyLBPlan50000 = ProxyLBPlan(50000) + // ProxyLBPlan100000 100,000cpsプラン + ProxyLBPlan100000 = ProxyLBPlan(100000) +) + +// AllowProxyLBPlans 有効なプランIDリスト +var AllowProxyLBPlans = []int{ + int(ProxyLBPlan1000), + int(ProxyLBPlan5000), + int(ProxyLBPlan10000), + int(ProxyLBPlan50000), + int(ProxyLBPlan100000), +} + +// GetPlan プラン取得(デフォルト: 1000cps) +func (p *ProxyLB) GetPlan() ProxyLBPlan { + classes := strings.Split(p.ServiceClass, "/") + class, err := strconv.Atoi(classes[len(classes)-1]) + if err != nil { + return ProxyLBPlan1000 + } + return ProxyLBPlan(class) +} + +// SetPlan プラン指定 +func (p *ProxyLB) SetPlan(plan ProxyLBPlan) { + p.ServiceClass = fmt.Sprintf("cloud/proxylb/plain/%d", plan) +} + +// SetHTTPHealthCheck HTTPヘルスチェック 設定 +func (p *ProxyLB) SetHTTPHealthCheck(hostHeader, path string, delayLoop int) { + if delayLoop <= 0 { + delayLoop = 10 + } + + p.Settings.ProxyLB.HealthCheck.Protocol = "http" + p.Settings.ProxyLB.HealthCheck.Host = hostHeader + p.Settings.ProxyLB.HealthCheck.Path = path + p.Settings.ProxyLB.HealthCheck.DelayLoop = delayLoop +} + +// SetTCPHealthCheck TCPヘルスチェック 設定 +func (p *ProxyLB) SetTCPHealthCheck(delayLoop int) { + if delayLoop <= 0 { + delayLoop = 10 + } + + p.Settings.ProxyLB.HealthCheck.Protocol = "tcp" + p.Settings.ProxyLB.HealthCheck.Host = "" + p.Settings.ProxyLB.HealthCheck.Path = "" + p.Settings.ProxyLB.HealthCheck.DelayLoop = delayLoop +} + +// SetSorryServer ソーリーサーバ 設定 +func (p *ProxyLB) SetSorryServer(ipaddress string, port int) { + var pt *int + if port > 0 { + pt = &port + } + p.Settings.ProxyLB.SorryServer = ProxyLBSorryServer{ + IPAddress: ipaddress, + Port: pt, + } +} + +// ClearSorryServer ソーリーサーバ クリア +func (p *ProxyLB) ClearSorryServer() { + p.SetSorryServer("", 0) +} + +// HasProxyLBServer ProxyLB配下にサーバーを保持しているか判定 +func (p *ProxyLB) HasProxyLBServer() bool { + return len(p.Settings.ProxyLB.Servers) > 0 +} + +// ClearProxyLBServer ProxyLB配下のサーバーをクリア +func (p *ProxyLB) ClearProxyLBServer() { + p.Settings.ProxyLB.Servers = []ProxyLBServer{} +} + +// AddBindPort バインドポート追加 +func (p *ProxyLB) AddBindPort(mode string, port int) { + p.Settings.ProxyLB.AddBindPort(mode, port) +} + +// DeleteBindPort バインドポート削除 +func (p *ProxyLB) DeleteBindPort(mode string, port int) { + p.Settings.ProxyLB.DeleteBindPort(mode, port) +} + +// ClearBindPorts バインドポート クリア +func (p *ProxyLB) ClearBindPorts() { + p.Settings.ProxyLB.BindPorts = []*ProxyLBBindPorts{} +} + +// AddServer ProxyLB配下のサーバーを追加 +func (p *ProxyLB) AddServer(ip string, port int, enabled bool) { + p.Settings.ProxyLB.AddServer(ip, port, enabled) +} + +// DeleteServer ProxyLB配下のサーバーを削除 +func (p *ProxyLB) DeleteServer(ip string, port int) { + p.Settings.ProxyLB.DeleteServer(ip, port) +} + +// ProxyLBSetting ProxyLBセッティング +type ProxyLBSetting struct { + HealthCheck ProxyLBHealthCheck `json:",omitempty"` // ヘルスチェック + SorryServer ProxyLBSorryServer `json:",omitempty"` // ソーリーサーバー + BindPorts []*ProxyLBBindPorts `json:",omitempty"` // プロキシ方式(プロトコル&ポート) + Servers []ProxyLBServer `json:",omitempty"` // サーバー +} + +// ProxyLBSorryServer ソーリーサーバ +type ProxyLBSorryServer struct { + IPAddress string // IPアドレス + Port *int // ポート +} + +// AddBindPort バインドポート追加 +func (s *ProxyLBSetting) AddBindPort(mode string, port int) { + var isExist bool + for i := range s.BindPorts { + if s.BindPorts[i].ProxyMode == mode && s.BindPorts[i].Port == port { + isExist = true + } + } + + if !isExist { + s.BindPorts = append(s.BindPorts, &ProxyLBBindPorts{ + ProxyMode: mode, + Port: port, + }) + } +} + +// DeleteBindPort バインドポート削除 +func (s *ProxyLBSetting) DeleteBindPort(mode string, port int) { + var res []*ProxyLBBindPorts + for i := range s.BindPorts { + if s.BindPorts[i].ProxyMode != mode || s.BindPorts[i].Port != port { + res = append(res, s.BindPorts[i]) + } + } + s.BindPorts = res +} + +// AddServer ProxyLB配下のサーバーを追加 +func (s *ProxyLBSetting) AddServer(ip string, port int, enabled bool) { + var record ProxyLBServer + var isExist = false + for i := range s.Servers { + if s.Servers[i].IPAddress == ip && s.Servers[i].Port == port { + isExist = true + s.Servers[i].Enabled = enabled + } + } + + if !isExist { + record = ProxyLBServer{ + IPAddress: ip, + Port: port, + Enabled: enabled, + } + s.Servers = append(s.Servers, record) + } +} + +// DeleteServer ProxyLB配下のサーバーを削除 +func (s *ProxyLBSetting) DeleteServer(ip string, port int) { + var res []ProxyLBServer + for i := range s.Servers { + if s.Servers[i].IPAddress != ip || s.Servers[i].Port != port { + res = append(res, s.Servers[i]) + } + } + + s.Servers = res +} + +// AllowProxyLBBindModes プロキシ方式 +var AllowProxyLBBindModes = []string{"http", "https"} + +// ProxyLBBindPorts プロキシ方式 +type ProxyLBBindPorts struct { + ProxyMode string `json:",omitempty"` // モード(プロトコル) + Port int `json:",omitempty"` // ポート +} + +// ProxyLBServer ProxyLB配下のサーバー +type ProxyLBServer struct { + IPAddress string `json:",omitempty"` // IPアドレス + Port int `json:",omitempty"` // ポート + Enabled bool `json:",omitempty"` // 有効/無効 +} + +// NewProxyLBServer ProxyLB配下のサーバ作成 +func NewProxyLBServer(ipaddress string, port int) *ProxyLBServer { + return &ProxyLBServer{ + IPAddress: ipaddress, + Port: port, + Enabled: true, + } +} + +// AllowProxyLBHealthCheckProtocols プロキシLBで利用できるヘルスチェックプロトコル +var AllowProxyLBHealthCheckProtocols = []string{"http", "tcp"} + +// ProxyLBHealthCheck ヘルスチェック +type ProxyLBHealthCheck struct { + Protocol string `json:",omitempty"` // プロトコル + Host string `json:",omitempty"` // 対象ホスト + Path string `json:",omitempty"` // HTTPの場合のリクエストパス + DelayLoop int `json:",omitempty"` // 監視間隔 + +} + +var defaultProxyLBHealthCheck = ProxyLBHealthCheck{ + Protocol: "http", + Host: "", + Path: "/", + DelayLoop: 10, +} + +// ProxyLBAdditionalCerts additional certificates +type ProxyLBAdditionalCerts []*ProxyLBCertificate + +// ProxyLBCertificates ProxyLBのSSL証明書 +type ProxyLBCertificates struct { + ServerCertificate string // サーバ証明書 + IntermediateCertificate string // 中間証明書 + PrivateKey string // 秘密鍵 + CertificateEndDate time.Time `json:",omitempty"` // 有効期限 + CertificateCommonName string `json:",omitempty"` // CommonName + AdditionalCerts ProxyLBAdditionalCerts +} + +// UnmarshalJSON UnmarshalJSON(AdditionalCertsが空の場合に空文字を返す問題への対応) +func (p *ProxyLBAdditionalCerts) UnmarshalJSON(data []byte) error { + targetData := strings.Replace(strings.Replace(string(data), " ", "", -1), "\n", "", -1) + if targetData == `` { + return nil + } + + var certs []*ProxyLBCertificate + if err := json.Unmarshal(data, &certs); err != nil { + return err + } + + *p = certs + return nil +} + +// SetPrimaryCert PrimaryCertを設定 +func (p *ProxyLBCertificates) SetPrimaryCert(cert *ProxyLBCertificate) { + p.ServerCertificate = cert.ServerCertificate + p.IntermediateCertificate = cert.IntermediateCertificate + p.PrivateKey = cert.PrivateKey + p.CertificateEndDate = cert.CertificateEndDate + p.CertificateCommonName = cert.CertificateCommonName +} + +// SetPrimaryCertValue PrimaryCertを設定 +func (p *ProxyLBCertificates) SetPrimaryCertValue(serverCert, intermediateCert, privateKey string) { + p.ServerCertificate = serverCert + p.IntermediateCertificate = intermediateCert + p.PrivateKey = privateKey +} + +// AddAdditionalCert AdditionalCertを追加 +func (p *ProxyLBCertificates) AddAdditionalCert(serverCert, intermediateCert, privateKey string) { + p.AdditionalCerts = append(p.AdditionalCerts, &ProxyLBCertificate{ + ServerCertificate: serverCert, + IntermediateCertificate: intermediateCert, + PrivateKey: privateKey, + }) +} + +// RemoveAdditionalCertAt 指定のインデックスを持つAdditionalCertを削除 +func (p *ProxyLBCertificates) RemoveAdditionalCertAt(index int) { + var certs []*ProxyLBCertificate + for i, cert := range p.AdditionalCerts { + if i != index { + certs = append(certs, cert) + } + } + p.AdditionalCerts = certs +} + +// RemoveAdditionalCert 指定の内容を持つAdditionalCertを削除 +func (p *ProxyLBCertificates) RemoveAdditionalCert(serverCert, intermediateCert, privateKey string) { + var certs []*ProxyLBCertificate + for _, cert := range p.AdditionalCerts { + if !(cert.ServerCertificate == serverCert && cert.IntermediateCertificate == intermediateCert && cert.PrivateKey == privateKey) { + certs = append(certs, cert) + } + } + p.AdditionalCerts = certs +} + +// RemoveAdditionalCerts AdditionalCertsを全て削除 +func (p *ProxyLBCertificates) RemoveAdditionalCerts() { + p.AdditionalCerts = []*ProxyLBCertificate{} +} + +// UnmarshalJSON UnmarshalJSON(CertificateEndDateのtime.TimeへのUnmarshal対応) +func (p *ProxyLBCertificates) UnmarshalJSON(data []byte) error { + var tmp map[string]interface{} + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + p.ServerCertificate = tmp["ServerCertificate"].(string) + p.IntermediateCertificate = tmp["IntermediateCertificate"].(string) + p.PrivateKey = tmp["PrivateKey"].(string) + p.CertificateCommonName = tmp["CertificateCommonName"].(string) + endDate := tmp["CertificateEndDate"].(string) + if endDate != "" { + date, err := time.Parse("Jan _2 15:04:05 2006 MST", endDate) + if err != nil { + return err + } + p.CertificateEndDate = date + } + + if _, ok := tmp["AdditionalCerts"].(string); !ok { + rawCerts, err := json.Marshal(tmp["AdditionalCerts"]) + if err != nil { + return err + } + var additionalCerts ProxyLBAdditionalCerts + if err := json.Unmarshal(rawCerts, &additionalCerts); err != nil { + return err + } + p.AdditionalCerts = additionalCerts + } + + return nil +} + +// ParseServerCertificate サーバ証明書のパース +func (p *ProxyLBCertificates) ParseServerCertificate() (*x509.Certificate, error) { + cert, e := p.parseCertificate(p.ServerCertificate) + if e != nil { + return nil, e + } + return cert, nil +} + +// ParseIntermediateCertificate 中間証明書のパース +func (p *ProxyLBCertificates) ParseIntermediateCertificate() (*x509.Certificate, error) { + cert, e := p.parseCertificate(p.IntermediateCertificate) + if e != nil { + return nil, e + } + return cert, nil +} + +func (p *ProxyLBCertificates) parseCertificate(certPEM string) (*x509.Certificate, error) { + block, _ := pem.Decode([]byte(certPEM)) + if block != nil { + return x509.ParseCertificate(block.Bytes) + } + return nil, fmt.Errorf("can't decode certificate") +} + +// ProxyLBCertificate ProxyLBのSSL証明書詳細 +type ProxyLBCertificate struct { + ServerCertificate string // サーバ証明書 + IntermediateCertificate string // 中間証明書 + PrivateKey string // 秘密鍵 + CertificateEndDate time.Time `json:",omitempty"` // 有効期限 + CertificateCommonName string `json:",omitempty"` // CommonName +} + +// UnmarshalJSON UnmarshalJSON(CertificateEndDateのtime.TimeへのUnmarshal対応) +func (p *ProxyLBCertificate) UnmarshalJSON(data []byte) error { + var tmp map[string]interface{} + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + p.ServerCertificate = tmp["ServerCertificate"].(string) + p.IntermediateCertificate = tmp["IntermediateCertificate"].(string) + p.PrivateKey = tmp["PrivateKey"].(string) + p.CertificateCommonName = tmp["CertificateCommonName"].(string) + endDate := tmp["CertificateEndDate"].(string) + if endDate != "" { + date, err := time.Parse("Jan _2 15:04:05 2006 MST", endDate) + if err != nil { + return err + } + p.CertificateEndDate = date + } + + return nil +} + +// ParseServerCertificate サーバ証明書のパース +func (p *ProxyLBCertificate) ParseServerCertificate() (*x509.Certificate, error) { + cert, e := p.parseCertificate(p.ServerCertificate) + if e != nil { + return nil, e + } + return cert, nil +} + +// ParseIntermediateCertificate 中間証明書のパース +func (p *ProxyLBCertificate) ParseIntermediateCertificate() (*x509.Certificate, error) { + cert, e := p.parseCertificate(p.IntermediateCertificate) + if e != nil { + return nil, e + } + return cert, nil +} + +func (p *ProxyLBCertificate) parseCertificate(certPEM string) (*x509.Certificate, error) { + block, _ := pem.Decode([]byte(certPEM)) + if block != nil { + return x509.ParseCertificate(block.Bytes) + } + return nil, fmt.Errorf("can't decode certificate") +} + +// ProxyLBHealth ProxyLBのヘルスチェック戻り値 +type ProxyLBHealth struct { + ActiveConn int // アクティブなコネクション数 + CPS int // 秒あたりコネクション数 + Servers []*ProxyLBHealthServer // 実サーバのステータス + CurrentVIP string // 現在のVIP +} + +// ProxyLBHealthServer ProxyLBの実サーバのステータス +type ProxyLBHealthServer struct { + ActiveConn int // アクティブなコネクション数 + Status string // ステータス(UP or DOWN) + IPAddress string // IPアドレス + Port string // ポート + CPS int // 秒あたりコネクション数 +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/server.go b/vendor/github.com/sacloud/libsacloud/sacloud/server.go index 37ed43b64..1b699152c 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/server.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/server.go @@ -21,6 +21,7 @@ type Server struct { propIcon // アイコン propTags // タグ propCreatedAt // 作成日時 + propWaitDiskMigration // サーバ作成時のディスク作成待ち } // DNSServers サーバの所属するリージョンの推奨ネームサーバリスト @@ -43,10 +44,13 @@ func (s *Server) IPAddress() string { // Gateway デフォルトゲートウェイアドレス func (s *Server) Gateway() string { - if len(s.Interfaces) == 0 || s.Interfaces[0].Switch == nil || s.Interfaces[0].Switch.UserSubnet == nil { + if len(s.Interfaces) == 0 || s.Interfaces[0].Switch == nil { return "" } - return s.Interfaces[0].Switch.UserSubnet.DefaultRoute + if s.Interfaces[0].Switch.UserSubnet != nil { + return s.Interfaces[0].Switch.UserSubnet.DefaultRoute + } + return s.Interfaces[0].Switch.Subnet.DefaultRoute } // DefaultRoute デフォルトゲートウェイアドレス(Gatewayのエイリアス) @@ -56,10 +60,13 @@ func (s *Server) DefaultRoute() string { // NetworkMaskLen サーバの1番目のNIC(eth0)のネットワークマスク長 func (s *Server) NetworkMaskLen() int { - if len(s.Interfaces) == 0 || s.Interfaces[0].Switch == nil || s.Interfaces[0].Switch.UserSubnet == nil { + if len(s.Interfaces) == 0 || s.Interfaces[0].Switch == nil { return 0 } - return s.Interfaces[0].Switch.UserSubnet.NetworkMaskLen + if s.Interfaces[0].Switch.UserSubnet != nil { + return s.Interfaces[0].Switch.UserSubnet.NetworkMaskLen + } + return s.Interfaces[0].Switch.Subnet.NetworkMaskLen } // NetworkAddress サーバの1番目のNIC(eth0)のネットワークアドレス @@ -79,6 +86,119 @@ func (s *Server) CIDRIPAddress() string { return "" } +// UpstreamType 1番目(0番目)のNICの上流ネットワーク種別 +func (s *Server) UpstreamType() EUpstreamNetworkType { + return s.UpstreamTypeAt(0) +} + +// UpstreamTypeAt 指定インデックスのNICの上流ネットワーク種別 +func (s *Server) UpstreamTypeAt(index int) EUpstreamNetworkType { + if len(s.Interfaces) <= index { + return EUpstreamNetworkUnknown + } + return s.Interfaces[index].UpstreamType() +} + +// SwitchID 上流のスイッチのID +// +// NICがない、上流スイッチが見つからない、上流が共有セグメントの場合は-1を返す +func (s *Server) SwitchID() int64 { + return s.SwitchIDAt(0) +} + +// SwitchIDAt 上流ネットワークのスイッチのID +// +// NICがない、上流スイッチが見つからない、上流が共有セグメントの場合は-1を返す +func (s *Server) SwitchIDAt(index int) int64 { + if len(s.Interfaces) <= index { + return -1 + } + + nic := s.Interfaces[index] + if nic.Switch == nil || nic.Switch.Scope == ESCopeShared { + return -1 + } + return nic.Switch.ID +} + +// SwitchName 上流のスイッチのID +// +// NICがない、上流スイッチが見つからない、上流が共有セグメントの場合は空文字を返す +func (s *Server) SwitchName() string { + return s.SwitchNameAt(0) +} + +// SwitchNameAt 上流ネットワークのスイッチのID +// +// NICがない、上流スイッチが見つからない、上流が共有セグメントの場合は空文字を返す +func (s *Server) SwitchNameAt(index int) string { + if len(s.Interfaces) <= index { + return "" + } + + nic := s.Interfaces[index] + if nic.Switch == nil || nic.Switch.Scope == ESCopeShared { + return "" + } + return nic.Switch.Name +} + +// Bandwidth 上流ネットワークの帯域幅(単位:Mbps) +// +// -1: 1番目(0番目)のNICが存在しない場合 or 切断されている場合 +// 0 : 制限なしの場合 +// 以外: 帯域幅(Mbps) +func (s *Server) Bandwidth() int { + return s.BandwidthAt(0) +} + +// BandwidthAt 上流ネットワークの帯域幅(単位:Mbps) +// +// -1: 存在しないインデックスを取得した場合 or 切断されている場合 +// 0 : 制限なしの場合 +// 以外: 帯域幅(Mbps) +func (s *Server) BandwidthAt(index int) int { + if len(s.Interfaces) <= index { + return -1 + } + + nic := s.Interfaces[index] + + switch nic.UpstreamType() { + case EUpstreamNetworkNone: + return -1 + case EUpstreamNetworkShared: + return 100 + case EUpstreamNetworkSwitch, EUpstreamNetworkRouter: + // + // 上流ネットワークがスイッチだった場合の帯域制限 + // https://manual.sakura.ad.jp/cloud/support/technical/network.html#support-network-03 + // + + // 専有ホストの場合は制限なし + if s.PrivateHost != nil { + return 0 + } + + // メモリに応じた制限 + memory := s.GetMemoryGB() + switch { + case memory < 32: + return 1000 + case 32 <= memory && memory < 128: + return 2000 + case 128 <= memory && memory < 224: + return 5000 + case 224 <= memory: + return 10000 + default: + return -1 + } + default: + return -1 + } +} + const ( // ServerMaxInterfaceLen サーバーに接続できるNICの最大数 ServerMaxInterfaceLen = 10 diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/sim.go b/vendor/github.com/sacloud/libsacloud/sacloud/sim.go index e513a1e8a..a663b1509 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/sim.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/sim.go @@ -2,10 +2,20 @@ package sacloud import ( "encoding/json" + "strconv" "strings" "time" ) +const ( + // SIMOperatorsKDDI KDDI + SIMOperatorsKDDI = "KDDI" + // SIMOperatorsDOCOMO Docomo + SIMOperatorsDOCOMO = "NTT DOCOMO" + // SIMOperatorsSoftBank SoftBank + SIMOperatorsSoftBank = "SoftBank" +) + // SIM SIM(CommonServiceItem) type SIM struct { *Resource // ID @@ -49,8 +59,8 @@ type SIMInfo struct { // SIMTrafficBytes 当月通信量 type SIMTrafficBytes struct { - UplinkBytes int64 `json:"uplink_bytes,omitempty"` - DownlinkBytes int64 `json:"downlink_bytes,omitempty"` + UplinkBytes uint64 `json:"uplink_bytes,omitempty"` + DownlinkBytes uint64 `json:"downlink_bytes,omitempty"` } // UnmarshalJSON JSONアンマーシャル(配列、オブジェクトが混在するためここで対応) @@ -60,15 +70,22 @@ func (s *SIMTrafficBytes) UnmarshalJSON(data []byte) error { return nil } tmp := &struct { - UplinkBytes int64 `json:"uplink_bytes,omitempty"` - DownlinkBytes int64 `json:"downlink_bytes,omitempty"` + UplinkBytes string `json:"uplink_bytes,omitempty"` + DownlinkBytes string `json:"downlink_bytes,omitempty"` }{} if err := json.Unmarshal(data, &tmp); err != nil { return err } - s.UplinkBytes = tmp.UplinkBytes - s.DownlinkBytes = tmp.DownlinkBytes + var err error + s.UplinkBytes, err = strconv.ParseUint(tmp.UplinkBytes, 10, 64) + if err != nil { + return err + } + s.DownlinkBytes, err = strconv.ParseUint(tmp.DownlinkBytes, 10, 64) + if err != nil { + return err + } return nil } @@ -93,6 +110,18 @@ type SIMLog struct { IMSI string `json:"imsi,omitempty"` } +// SIMNetworkOperatorConfig SIM通信キャリア設定 +type SIMNetworkOperatorConfig struct { + Allow bool `json:"allow,omitempty"` + CountryCode string `json:"country_code,omitempty"` + Name string `json:"name,omitempty"` +} + +// SIMNetworkOperatorConfigs SIM通信キャリア設定 リクエストパラメータ +type SIMNetworkOperatorConfigs struct { + NetworkOperatorConfigs []*SIMNetworkOperatorConfig `json:"network_operator_config,omitempty"` +} + // CreateNewSIM SIM作成 func CreateNewSIM(name string, iccID string, passcode string) *SIM { return &SIM{ diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/simple_monitor.go b/vendor/github.com/sacloud/libsacloud/sacloud/simple_monitor.go index 5df470976..e2a0844d7 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/simple_monitor.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/simple_monitor.go @@ -1,5 +1,7 @@ package sacloud +import "time" + // SimpleMonitor シンプル監視 type SimpleMonitor struct { *Resource // ID @@ -47,18 +49,20 @@ type SimpleMonitorProvider struct { // SimpleMonitorHealthCheck ヘルスチェック type SimpleMonitorHealthCheck struct { - Protocol string `json:",omitempty"` // プロトコル - Port string `json:",omitempty"` // ポート - Path string `json:",omitempty"` // HTTP/HTTPS監視の場合のリクエストパス - Status string `json:",omitempty"` // HTTP/HTTPS監視の場合の期待ステータスコード - SNI string `json:",omitempty"` // HTTPS監視時のSNI有効/無効 - Host string `json:",omitempty"` // 対象ホスト(IP or FQDN) - QName string `json:",omitempty"` // DNS監視の場合の問い合わせFQDN - ExpectedData string `json:",omitempty"` // 期待値 - Community string `json:",omitempty"` // SNMP監視の場合のコミュニティ名 - SNMPVersion string `json:",omitempty"` // SNMP監視 SNMPバージョン - OID string `json:",omitempty"` // SNMP監視 OID - RemainingDays int `json:",omitempty"` // SSL証明書 有効残日数 + Protocol string `json:",omitempty"` // プロトコル + Port string `json:",omitempty"` // ポート + Path string `json:",omitempty"` // HTTP/HTTPS監視の場合のリクエストパス + Status string `json:",omitempty"` // HTTP/HTTPS監視の場合の期待ステータスコード + SNI string `json:",omitempty"` // HTTPS監視時のSNI有効/無効 + Host string `json:",omitempty"` // 対象ホスト(IP or FQDN) + BasicAuthUsername string `json:",omitempty"` // HTTP/HTTPS監視の場合のBASIC認証 ユーザー名 + BasicAuthPassword string `json:",omitempty"` // HTTP/HTTPS監視の場合のBASIC認証 パスワード + QName string `json:",omitempty"` // DNS監視の場合の問い合わせFQDN + ExpectedData string `json:",omitempty"` // 期待値 + Community string `json:",omitempty"` // SNMP監視の場合のコミュニティ名 + SNMPVersion string `json:",omitempty"` // SNMP監視 SNMPバージョン + OID string `json:",omitempty"` // SNMP監視 OID + RemainingDays int `json:",omitempty"` // SSL証明書 有効残日数 } // SimpleMonitorNotify シンプル監視通知 @@ -68,6 +72,33 @@ type SimpleMonitorNotify struct { IncomingWebhooksURL string `json:",omitempty"` // Slack通知の場合のWebhook URL } +// ESimpleMonitorHealth シンプル監視ステータス +type ESimpleMonitorHealth string + +var ( + // EHealthUp Up + EHealthUp = ESimpleMonitorHealth("UP") + // EHealthDown Down + EHealthDown = ESimpleMonitorHealth("DOWN") +) + +// IsUp アップ +func (e ESimpleMonitorHealth) IsUp() bool { + return e == EHealthUp +} + +// IsDown ダウン +func (e ESimpleMonitorHealth) IsDown() bool { + return e == EHealthDown +} + +// SimpleMonitorHealthCheckStatus シンプル監視ステータス +type SimpleMonitorHealthCheckStatus struct { + LastCheckedAt time.Time + LastHealthChangedAt time.Time + Health ESimpleMonitorHealth +} + // CreateNewSimpleMonitor シンプル監視作成 func CreateNewSimpleMonitor(target string) *SimpleMonitor { return &SimpleMonitor{ @@ -142,29 +173,33 @@ func (s *SimpleMonitor) SetHealthCheckTCP(port string) { } // SetHealthCheckHTTP HTTPでのヘルスチェック設定 -func (s *SimpleMonitor) SetHealthCheckHTTP(port string, path string, status string, host string) { +func (s *SimpleMonitor) SetHealthCheckHTTP(port string, path string, status string, host string, user, pass string) { s.Settings.SimpleMonitor.HealthCheck = &SimpleMonitorHealthCheck{ - Protocol: "http", - Port: port, - Path: path, - Status: status, - Host: host, + Protocol: "http", + Port: port, + Path: path, + Status: status, + Host: host, + BasicAuthUsername: user, + BasicAuthPassword: pass, } } // SetHealthCheckHTTPS HTTPSでのヘルスチェック設定 -func (s *SimpleMonitor) SetHealthCheckHTTPS(port string, path string, status string, host string, sni bool) { +func (s *SimpleMonitor) SetHealthCheckHTTPS(port string, path string, status string, host string, sni bool, user, pass string) { strSNI := "False" if sni { strSNI = "True" } s.Settings.SimpleMonitor.HealthCheck = &SimpleMonitorHealthCheck{ - Protocol: "https", - Port: port, - Path: path, - Status: status, - Host: host, - SNI: strSNI, + Protocol: "https", + Port: port, + Path: path, + Status: status, + Host: host, + SNI: strSNI, + BasicAuthUsername: user, + BasicAuthPassword: pass, } } diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/switch.go b/vendor/github.com/sacloud/libsacloud/sacloud/switch.go index cb90bfb1b..75013c12c 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/switch.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/switch.go @@ -15,6 +15,7 @@ type Switch struct { propIcon // アイコン propTags // タグ propCreatedAt // 作成日時 + propZone // ゾーン ServerCount int `json:",omitempty"` // 接続サーバー数 ApplianceCount int `json:",omitempty"` // 接続アプライアンス数 diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/vpc_router.go b/vendor/github.com/sacloud/libsacloud/sacloud/vpc_router.go index 20bee798c..ed394e8b6 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/vpc_router.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/vpc_router.go @@ -236,3 +236,134 @@ func (v *VPCRouter) FindBelongsInterface(ip net.IP) (int, *VPCRouterInterface) { } return -1, nil } + +// IPAddress1 1番目(0番目)のNICのIPアドレス1 +func (v *VPCRouter) IPAddress1() string { + return v.IPAddress1At(0) +} + +// IPAddress1At 指定インデックスのNICのIPアドレス1 +func (v *VPCRouter) IPAddress1At(index int) string { + if len(v.Interfaces) <= index { + return "" + } + + if index == 0 { + if v.IsStandardPlan() { + return v.Interfaces[0].IPAddress + } + + if !v.HasInterfaces() { + return "" + } + if len(v.Settings.Router.Interfaces[0].IPAddress) < 1 { + return "" + } + return v.Settings.Router.Interfaces[0].IPAddress[0] + } + + nic := v.Settings.Router.Interfaces[index] + if len(nic.IPAddress) < 1 { + return "" + } + return nic.IPAddress[0] +} + +// IPAddress2 1番目(0番目)のNICのIPアドレス2 +func (v *VPCRouter) IPAddress2() string { + return v.IPAddress2At(0) +} + +// IPAddress2At 指定インデックスのNICのIPアドレス2 +func (v *VPCRouter) IPAddress2At(index int) string { + if v.IsStandardPlan() { + return "" + } + if len(v.Interfaces) <= index { + return "" + } + + if index == 0 { + if !v.HasInterfaces() { + return "" + } + if len(v.Settings.Router.Interfaces[0].IPAddress) < 2 { + return "" + } + return v.Settings.Router.Interfaces[0].IPAddress[1] + } + + nic := v.Settings.Router.Interfaces[index] + if len(nic.IPAddress) < 2 { + return "" + } + return nic.IPAddress[1] +} + +// VirtualIPAddress 1番目(0番目)のNICのVIP +func (v *VPCRouter) VirtualIPAddress() string { + return v.VirtualIPAddressAt(0) +} + +// VirtualIPAddressAt 指定インデックスのNICのVIP +func (v *VPCRouter) VirtualIPAddressAt(index int) string { + if v.IsStandardPlan() { + return "" + } + if len(v.Interfaces) <= index { + return "" + } + + return v.Settings.Router.Interfaces[0].VirtualIPAddress +} + +// NetworkMaskLen 1番目(0番目)のNICのネットワークマスク長 +func (v *VPCRouter) NetworkMaskLen() int { + return v.NetworkMaskLenAt(0) +} + +// NetworkMaskLenAt 指定インデックスのNICのネットワークマスク長 +func (v *VPCRouter) NetworkMaskLenAt(index int) int { + if !v.HasInterfaces() { + return -1 + } + if len(v.Interfaces) <= index { + return -1 + } + + if index == 0 { + return v.Interfaces[0].Switch.Subnet.NetworkMaskLen + } + + return v.Settings.Router.Interfaces[index].NetworkMaskLen +} + +// Zone スイッチから現在のゾーン名を取得 +// +// Note: 共有セグメント接続時は取得不能 +func (v *VPCRouter) Zone() string { + if v.Switch != nil { + return v.Switch.GetZoneName() + } + + if len(v.Interfaces) > 0 && v.Interfaces[0].Switch != nil { + return v.Interfaces[0].Switch.GetZoneName() + } + + return "" +} + +// VRID VRIDを取得 +// +// スタンダードプラン、またはVRIDの参照に失敗した場合は-1を返す +func (v *VPCRouter) VRID() int { + if v.IsStandardPlan() { + return -1 + } + + if !v.HasSetting() || v.Settings.Router.VRID == nil { + return -1 + } + + return *v.Settings.Router.VRID +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/vpc_router_setting.go b/vendor/github.com/sacloud/libsacloud/sacloud/vpc_router_setting.go index a9e85ca57..bea0db324 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/vpc_router_setting.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/vpc_router_setting.go @@ -22,6 +22,7 @@ type VPCRouterSetting struct { RemoteAccessUsers *VPCRouterRemoteAccessUsers `json:",omitempty"` // リモートアクセスユーザー設定 SiteToSiteIPsecVPN *VPCRouterSiteToSiteIPsecVPN `json:",omitempty"` // サイト間VPN設定 StaticRoutes *VPCRouterStaticRoutes `json:",omitempty"` // スタティックルート設定 + InternetConnection *VPCRouterInternetConnection `json:",omitempty"` // インターネット接続 VRID *int `json:",omitempty"` // VRID SyslogHost string `json:",omitempty"` // syslog転送先ホスト @@ -1156,3 +1157,22 @@ func (s *VPCRouterSetting) FindStaticRoute(prefix string, nextHop string) (int, } return -1, nil } + +// VPCRouterInternetConnection インターネット接続 +type VPCRouterInternetConnection struct { + Enabled string `json:",omitempty"` // 有効/無効 +} + +// SetInternetConnection インターネット接続 有効/無効 設定 +func (s *VPCRouterSetting) SetInternetConnection(enabled bool) { + if s.InternetConnection == nil { + s.InternetConnection = &VPCRouterInternetConnection{ + Enabled: "True", + } + } + if enabled { + s.InternetConnection.Enabled = "True" + } else { + s.InternetConnection.Enabled = "False" + } +} diff --git a/vendor/github.com/sacloud/libsacloud/sacloud/vpc_router_status.go b/vendor/github.com/sacloud/libsacloud/sacloud/vpc_router_status.go index ffe0ea0ef..70a5f798f 100644 --- a/vendor/github.com/sacloud/libsacloud/sacloud/vpc_router_status.go +++ b/vendor/github.com/sacloud/libsacloud/sacloud/vpc_router_status.go @@ -5,4 +5,23 @@ type VPCRouterStatus struct { FirewallReceiveLogs []string FirewallSendLogs []string VPNLogs []string + SessionCount int + DHCPServerLeases []struct { + IPAddress string + MACAddress string + } + L2TPIPsecServerSessions []struct { + User string + IPAddress string + TimeSec int + } + PPTPServerSessions []struct { + User string + IPAddress string + TimeSec int + } + SiteToSiteIPsecVPNPeers []struct { + Status string + Peer string + } } diff --git a/vendor/github.com/sacloud/libsacloud/utils/mutexkv/mutexkv.go b/vendor/github.com/sacloud/libsacloud/utils/mutexkv/mutexkv.go new file mode 100644 index 000000000..383555888 --- /dev/null +++ b/vendor/github.com/sacloud/libsacloud/utils/mutexkv/mutexkv.go @@ -0,0 +1,43 @@ +package mutexkv + +import ( + "sync" +) + +// MutexKV is a simple key/value store for arbitrary mutexes. It can be used to +// serialize changes across arbitrary collaborators that share knowledge of the +// keys they must serialize on. +type MutexKV struct { + lock sync.Mutex + store map[string]*sync.Mutex +} + +// Lock the mutex for the given key. Caller is responsible for calling Unlock +// for the same key +func (m *MutexKV) Lock(key string) { + m.get(key).Lock() +} + +// Unlock the mutex for the given key. Caller must have called Lock for the same key first +func (m *MutexKV) Unlock(key string) { + m.get(key).Unlock() +} + +// Returns a mutex for the given key, no guarantee of its lock status +func (m *MutexKV) get(key string) *sync.Mutex { + m.lock.Lock() + defer m.lock.Unlock() + mutex, ok := m.store[key] + if !ok { + mutex = &sync.Mutex{} + m.store[key] = mutex + } + return mutex +} + +// NewMutexKV Returns a properly initalized MutexKV +func NewMutexKV() *MutexKV { + return &MutexKV{ + store: make(map[string]*sync.Mutex), + } +} diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE new file mode 100644 index 000000000..7a4a3ea24 --- /dev/null +++ b/vendor/go.opencensus.io/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go new file mode 100644 index 000000000..9a638781c --- /dev/null +++ b/vendor/go.opencensus.io/internal/internal.go @@ -0,0 +1,37 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opencensus.io/internal" + +import ( + "fmt" + "time" + + opencensus "go.opencensus.io" +) + +// UserAgent is the user agent to be added to the outgoing +// requests from the exporters. +var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Now().Sub(start)) +} diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go new file mode 100644 index 000000000..de8ccf236 --- /dev/null +++ b/vendor/go.opencensus.io/internal/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// Sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func Sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go new file mode 100644 index 000000000..41b2c3fc0 --- /dev/null +++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package tagencoding contains the tag encoding +// used interally by the stats collector. +package tagencoding // import "go.opencensus.io/internal/tagencoding" + +// Values represent the encoded buffer for the values. +type Values struct { + Buffer []byte + WriteIndex int + ReadIndex int +} + +func (vb *Values) growIfRequired(expected int) { + if len(vb.Buffer)-vb.WriteIndex < expected { + tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) + copy(tmp, vb.Buffer) + vb.Buffer = tmp + } +} + +// WriteValue is the helper method to encode Values from map[Key][]byte. +func (vb *Values) WriteValue(v []byte) { + length := len(v) & 0xff + vb.growIfRequired(1 + length) + + // writing length of v + vb.Buffer[vb.WriteIndex] = byte(length) + vb.WriteIndex++ + + if length == 0 { + // No value was encoded for this key + return + } + + // writing v + copy(vb.Buffer[vb.WriteIndex:], v[:length]) + vb.WriteIndex += length +} + +// ReadValue is the helper method to decode Values to a map[Key][]byte. +func (vb *Values) ReadValue() []byte { + // read length of v + length := int(vb.Buffer[vb.ReadIndex]) + vb.ReadIndex++ + if length == 0 { + // No value was encoded for this key + return nil + } + + // read value of v + v := make([]byte, length) + endIdx := vb.ReadIndex + length + copy(v, vb.Buffer[vb.ReadIndex:endIdx]) + vb.ReadIndex = endIdx + return v +} + +// Bytes returns a reference to already written bytes in the Buffer. +func (vb *Values) Bytes() []byte { + return vb.Buffer[:vb.WriteIndex] +} diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go new file mode 100644 index 000000000..073af7b47 --- /dev/null +++ b/vendor/go.opencensus.io/internal/traceinternals.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "time" +) + +// Trace allows internal access to some trace functionality. +// TODO(#412): remove this +var Trace interface{} + +// LocalSpanStoreEnabled true if the local span store is enabled. +var LocalSpanStoreEnabled bool + +// BucketConfiguration stores the number of samples to store for span buckets +// for successful and failed spans for a particular span name. +type BucketConfiguration struct { + Name string + MaxRequestsSucceeded int + MaxRequestsErrors int +} + +// PerMethodSummary is a summary of the spans stored for a single span name. +type PerMethodSummary struct { + Active int + LatencyBuckets []LatencyBucketSummary + ErrorBuckets []ErrorBucketSummary +} + +// LatencyBucketSummary is a summary of a latency bucket. +type LatencyBucketSummary struct { + MinLatency, MaxLatency time.Duration + Size int +} + +// ErrorBucketSummary is a summary of an error bucket. +type ErrorBucketSummary struct { + ErrorCode int32 + Size int +} diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/metric/metricdata/doc.go new file mode 100644 index 000000000..52a7b3bf8 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metricdata contains the metrics data model. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go new file mode 100644 index 000000000..12695ce2d --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/exemplar.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Exemplars keys. +const ( + AttachmentKeySpanContext = "SpanContext" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +// +// Their purpose is to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go new file mode 100644 index 000000000..aadae41e6 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/label.go @@ -0,0 +1,35 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// LabelKey represents key of a label. It has optional +// description attribute. +type LabelKey struct { + Key string + Description string +} + +// LabelValue represents the value of a label. +// The zero value represents a missing label value, which may be treated +// differently to an empty string value by some back ends. +type LabelValue struct { + Value string // string value of the label + Present bool // flag that indicated whether a value is present or not +} + +// NewLabelValue creates a new non-nil LabelValue that represents the given string. +func NewLabelValue(val string) LabelValue { + return LabelValue{Value: val, Present: true} +} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go new file mode 100644 index 000000000..8293712c7 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/metric.go @@ -0,0 +1,46 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" + + "go.opencensus.io/resource" +) + +// Descriptor holds metadata about a metric. +type Descriptor struct { + Name string // full name of the metric + Description string // human-readable description + Unit Unit // units for the measure + Type Type // type of measure + LabelKeys []LabelKey // label keys +} + +// Metric represents a quantity measured against a resource with different +// label value combinations. +type Metric struct { + Descriptor Descriptor // metric descriptor + Resource *resource.Resource // resource against which this was measured + TimeSeries []*TimeSeries // one time series for each combination of label values +} + +// TimeSeries is a sequence of points associated with a combination of label +// values. +type TimeSeries struct { + LabelValues []LabelValue // label values, same order as keys in the metric descriptor + Points []Point // points sequence + StartTime time.Time // time we started recording this time series +} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go new file mode 100644 index 000000000..7fe057b19 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/point.go @@ -0,0 +1,193 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Point is a single data point of a time series. +type Point struct { + // Time is the point in time that this point represents in a time series. + Time time.Time + // Value is the value of this point. Prefer using ReadValue to switching on + // the value type, since new value types might be added. + Value interface{} +} + +//go:generate stringer -type ValueType + +// NewFloat64Point creates a new Point holding a float64 value. +func NewFloat64Point(t time.Time, val float64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewInt64Point creates a new Point holding an int64 value. +func NewInt64Point(t time.Time, val int64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewDistributionPoint creates a new Point holding a Distribution value. +func NewDistributionPoint(t time.Time, val *Distribution) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewSummaryPoint creates a new Point holding a Summary value. +func NewSummaryPoint(t time.Time, val *Summary) Point { + return Point{ + Value: val, + Time: t, + } +} + +// ValueVisitor allows reading the value of a point. +type ValueVisitor interface { + VisitFloat64Value(float64) + VisitInt64Value(int64) + VisitDistributionValue(*Distribution) + VisitSummaryValue(*Summary) +} + +// ReadValue accepts a ValueVisitor and calls the appropriate method with the +// value of this point. +// Consumers of Point should use this in preference to switching on the type +// of the value directly, since new value types may be added. +func (p Point) ReadValue(vv ValueVisitor) { + switch v := p.Value.(type) { + case int64: + vv.VisitInt64Value(v) + case float64: + vv.VisitFloat64Value(v) + case *Distribution: + vv.VisitDistributionValue(v) + case *Summary: + vv.VisitSummaryValue(v) + default: + panic("unexpected value type") + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type Distribution struct { + // Count is the number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 + // Sum is the sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 + // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 + // BucketOptions describes the bounds of the histogram buckets in this + // distribution. + // + // A Distribution may optionally contain a histogram of the values in the + // population. + // + // If nil, there is no associated histogram. + BucketOptions *BucketOptions + // Bucket If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []Bucket +} + +// BucketOptions describes the bounds of the histogram buckets in this +// distribution. +type BucketOptions struct { + // Bounds specifies a set of bucket upper bounds. + // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket + // index i are: + // + // [0, Bounds[i]) for i == 0 + // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 + // [Bounds[i-1], +infinity) for i == N-1 + Bounds []float64 +} + +// Bucket represents a single bucket (value range) in a distribution. +type Bucket struct { + // Count is the number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 + // Exemplar associated with this bucket (if any). + Exemplar *Exemplar +} + +// Summary is a representation of percentiles. +type Summary struct { + // Count is the cumulative count (if available). + Count int64 + // Sum is the cumulative sum of values (if available). + Sum float64 + // HasCountAndSum is true if Count and Sum are available. + HasCountAndSum bool + // Snapshot represents percentiles calculated over an arbitrary time window. + // The values in this struct can be reset at arbitrary unknown times, with + // the requirement that all of them are reset at the same time. + Snapshot Snapshot +} + +// Snapshot represents percentiles over an arbitrary time. +// The values in this struct can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type Snapshot struct { + // Count is the number of values in the snapshot. Optional since some systems don't + // expose this. Set to 0 if not available. + Count int64 + // Sum is the sum of values in the snapshot. Optional since some systems don't + // expose this. If count is 0 then this field must be zero. + Sum float64 + // Percentiles is a map from percentile (range (0-100.0]) to the value of + // the percentile. + Percentiles map[float64]float64 +} + +//go:generate stringer -type Type + +// Type is the overall type of metric, including its value type and whether it +// represents a cumulative total (since the start time) or if it represents a +// gauge value. +type Type int + +// Metric types. +const ( + TypeGaugeInt64 Type = iota + TypeGaugeFloat64 + TypeGaugeDistribution + TypeCumulativeInt64 + TypeCumulativeFloat64 + TypeCumulativeDistribution + TypeSummary +) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go new file mode 100644 index 000000000..c3f8ec27b --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/type_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type Type"; DO NOT EDIT. + +package metricdata + +import "strconv" + +const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" + +var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go new file mode 100644 index 000000000..b483a1371 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/unit.go @@ -0,0 +1,27 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// Unit is a string encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +type Unit string + +// Predefined units. To record against a unit not represented here, create your +// own Unit type constant from a string. +const ( + UnitDimensionless Unit = "1" + UnitBytes Unit = "By" + UnitMilliseconds Unit = "ms" +) diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go new file mode 100644 index 000000000..ca1f39049 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/manager.go @@ -0,0 +1,78 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "sync" +) + +// Manager maintains a list of active producers. Producers can register +// with the manager to allow readers to read all metrics provided by them. +// Readers can retrieve all producers registered with the manager, +// read metrics from the producers and export them. +type Manager struct { + mu sync.RWMutex + producers map[Producer]struct{} +} + +var prodMgr *Manager +var once sync.Once + +// GlobalManager is a single instance of producer manager +// that is used by all producers and all readers. +func GlobalManager() *Manager { + once.Do(func() { + prodMgr = &Manager{} + prodMgr.producers = make(map[Producer]struct{}) + }) + return prodMgr +} + +// AddProducer adds the producer to the Manager if it is not already present. +func (pm *Manager) AddProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + pm.producers[producer] = struct{}{} +} + +// DeleteProducer deletes the producer from the Manager if it is present. +func (pm *Manager) DeleteProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + delete(pm.producers, producer) +} + +// GetAll returns a slice of all producer currently registered with +// the Manager. For each call it generates a new slice. The slice +// should not be cached as registration may change at any time. It is +// typically called periodically by exporter to read metrics from +// the producers. +func (pm *Manager) GetAll() []Producer { + pm.mu.Lock() + defer pm.mu.Unlock() + producers := make([]Producer, len(pm.producers)) + i := 0 + for producer := range pm.producers { + producers[i] = producer + i++ + } + return producers +} diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go new file mode 100644 index 000000000..6cee9ed17 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/producer.go @@ -0,0 +1,28 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "go.opencensus.io/metric/metricdata" +) + +// Producer is a source of metrics. +type Producer interface { + // Read should return the current values of all metrics supported by this + // metric provider. + // The returned metrics should be unique for each combination of name and + // resource. + Read() []*metricdata.Metric +} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go new file mode 100644 index 000000000..d2565f1e2 --- /dev/null +++ b/vendor/go.opencensus.io/opencensus.go @@ -0,0 +1,21 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package opencensus contains Go support for OpenCensus. +package opencensus // import "go.opencensus.io" + +// Version is the current release version of OpenCensus in use. +func Version() string { + return "0.21.0" +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go new file mode 100644 index 000000000..da815b2a7 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -0,0 +1,117 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "net/http" + "net/http/httptrace" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Transport is an http.RoundTripper that instruments all outgoing requests with +// OpenCensus stats and tracing. +// +// The zero value is intended to be a useful default, but for +// now it's recommended that you explicitly set Propagation, since the default +// for this may change. +type Transport struct { + // Base may be set to wrap another http.RoundTripper that does the actual + // requests. By default http.DefaultTransport is used. + // + // If base HTTP roundtripper implements CancelRequest, + // the returned round tripper will be cancelable. + Base http.RoundTripper + + // Propagation defines how traces are propagated. If unspecified, a default + // (currently B3 format) will be used. + Propagation propagation.HTTPFormat + + // StartOptions are applied to the span started by this Transport around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // NameFromRequest holds the function to use for generating the span name + // from the information found in the outgoing HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + + // NewClientTrace may be set to a function allowing the current *trace.Span + // to be annotated with HTTP request event information emitted by the + // httptrace package. + NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace + + // TODO: Implement tag propagation for HTTP. +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base() + if isHealthEndpoint(req.URL.Path) { + return rt.RoundTrip(req) + } + // TODO: remove excessive nesting of http.RoundTrippers here. + format := t.Propagation + if format == nil { + format = defaultFormat + } + spanNameFormatter := t.FormatSpanName + if spanNameFormatter == nil { + spanNameFormatter = spanNameFromURL + } + + startOpts := t.StartOptions + if t.GetStartOptions != nil { + startOpts = t.GetStartOptions(req) + } + + rt = &traceTransport{ + base: rt, + format: format, + startOptions: trace.StartOptions{ + Sampler: startOpts.Sampler, + SpanKind: trace.SpanKindClient, + }, + formatSpanName: spanNameFormatter, + newClientTrace: t.NewClientTrace, + } + rt = statsTransport{base: rt} + return rt.RoundTrip(req) +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + cr.CancelRequest(req) + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go new file mode 100644 index 000000000..17142aabe --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -0,0 +1,143 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. +type statsTransport struct { + base http.RoundTripper +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. +func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { + ctx, _ := tag.New(req.Context(), + tag.Upsert(KeyClientHost, req.Host), + tag.Upsert(Host, req.Host), + tag.Upsert(KeyClientPath, req.URL.Path), + tag.Upsert(Path, req.URL.Path), + tag.Upsert(KeyClientMethod, req.Method), + tag.Upsert(Method, req.Method)) + req = req.WithContext(ctx) + track := &tracker{ + start: time.Now(), + ctx: ctx, + } + if req.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if req.ContentLength > 0 { + track.reqSize = req.ContentLength + } + stats.Record(ctx, ClientRequestCount.M(1)) + + // Perform request. + resp, err := t.base.RoundTrip(req) + + if err != nil { + track.statusCode = http.StatusInternalServerError + track.end() + } else { + track.statusCode = resp.StatusCode + if req.Method != "HEAD" { + track.respContentLength = resp.ContentLength + } + if resp.Body == nil { + track.end() + } else { + track.body = resp.Body + resp.Body = wrappedBody(track, resp.Body) + } + } + return resp, err +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t statsTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +type tracker struct { + ctx context.Context + respSize int64 + respContentLength int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once +} + +var _ io.ReadCloser = (*tracker)(nil) + +func (t *tracker) end() { + t.endOnce.Do(func() { + latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) + respSize := t.respSize + if t.respSize == 0 && t.respContentLength > 0 { + respSize = t.respContentLength + } + m := []stats.Measurement{ + ClientSentBytes.M(t.reqSize), + ClientReceivedBytes.M(respSize), + ClientRoundtripLatency.M(latencyMs), + ClientLatency.M(latencyMs), + ClientResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ClientRequestBytes.M(t.reqSize)) + } + + stats.RecordWithTags(t.ctx, []tag.Mutator{ + tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), + tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), + }, m...) + }) +} + +func (t *tracker) Read(b []byte) (int, error) { + n, err := t.body.Read(b) + t.respSize += int64(n) + switch err { + case nil: + return n, nil + case io.EOF: + t.end() + } + return n, err +} + +func (t *tracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + t.end() + return t.body.Close() +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go new file mode 100644 index 000000000..10e626b16 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ochttp provides OpenCensus instrumentation for net/http package. +// +// For server instrumentation, see Handler. For client-side instrumentation, +// see Transport. +package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go new file mode 100644 index 000000000..2f1c7f006 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -0,0 +1,123 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package b3 contains a propagation.HTTPFormat implementation +// for B3 propagation. See https://github.com/openzipkin/b3-propagation +// for more details. +package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" + +import ( + "encoding/hex" + "net/http" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// B3 headers that OpenCensus understands. +const ( + TraceIDHeader = "X-B3-TraceId" + SpanIDHeader = "X-B3-SpanId" + SampledHeader = "X-B3-Sampled" +) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers in B3 propagation format. +// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers +// because there are additional fields not represented in the +// OpenCensus span context. Spans created from the incoming +// header will be the direct children of the client-side span. +// Similarly, receiver of the outgoing spans should use client-side +// span created by OpenCensus as the parent. +type HTTPFormat struct{} + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// SpanContextFromRequest extracts a B3 span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) + return trace.SpanContext{ + TraceID: tid, + SpanID: sid, + TraceOptions: sampled, + }, true +} + +// ParseTraceID parses the value of the X-B3-TraceId header. +func ParseTraceID(tid string) (trace.TraceID, bool) { + if tid == "" { + return trace.TraceID{}, false + } + b, err := hex.DecodeString(tid) + if err != nil { + return trace.TraceID{}, false + } + var traceID trace.TraceID + if len(b) <= 8 { + // The lower 64-bits. + start := 8 + (8 - len(b)) + copy(traceID[start:], b) + } else { + start := 16 - len(b) + copy(traceID[start:], b) + } + + return traceID, true +} + +// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. +func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { + if sid == "" { + return trace.SpanID{}, false + } + b, err := hex.DecodeString(sid) + if err != nil { + return trace.SpanID{}, false + } + start := 8 - len(b) + copy(spanID[start:], b) + return spanID, true +} + +// ParseSampled parses the value of the X-B3-Sampled header. +func ParseSampled(sampled string) (trace.TraceOptions, bool) { + switch sampled { + case "true", "1": + return trace.TraceOptions(1), true + default: + return trace.TraceOptions(0), false + } +} + +// SpanContextToRequest modifies the given request to include B3 headers. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) + req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) + + var sampled string + if sc.IsSampled() { + sampled = "1" + } else { + sampled = "0" + } + req.Header.Set(SampledHeader, sampled) +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go new file mode 100644 index 000000000..5e6a34307 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/route.go @@ -0,0 +1,61 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "net/http" + + "go.opencensus.io/tag" +) + +// SetRoute sets the http_server_route tag to the given value. +// It's useful when an HTTP framework does not support the http.Handler interface +// and using WithRouteTag is not an option, but provides a way to hook into the request flow. +func SetRoute(ctx context.Context, route string) { + if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) + } +} + +// WithRouteTag returns an http.Handler that records stats with the +// http_server_route tag set to the given value. +func WithRouteTag(handler http.Handler, route string) http.Handler { + return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { + addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} + ctx, _ := tag.New(r.Context(), addRoute...) + r = r.WithContext(ctx) + handler.ServeHTTP(w, r) + return addRoute + }) +} + +// taggedHandlerFunc is a http.Handler that returns tags describing the +// processing of the request. These tags will be recorded along with the +// measures in this package at the end of the request. +type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator + +func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { + tags := h(w, r) + if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tags...) + } +} + +type addedTagsKey struct{} + +type addedTags struct { + t []tag.Mutator +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go new file mode 100644 index 000000000..4f6404fa7 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -0,0 +1,449 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Handler is an http.Handler wrapper to instrument your HTTP server with +// OpenCensus. It supports both stats and tracing. +// +// Tracing +// +// This handler is aware of the incoming request's span, reading it from request +// headers as configured using the Propagation field. +// The extracted span can be accessed from the incoming request's +// context. +// +// span := trace.FromContext(r.Context()) +// +// The server span will be automatically ended at the end of ServeHTTP. +type Handler struct { + // Propagation defines how traces are propagated. If unspecified, + // B3 propagation will be used. + Propagation propagation.HTTPFormat + + // Handler is the handler used to handle the incoming request. + Handler http.Handler + + // StartOptions are applied to the span started by this Handler around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) + // servers. If true, any trace metadata set on the incoming request will + // be added as a linked trace instead of being added as a parent of the + // current trace. + IsPublicEndpoint bool + + // FormatSpanName holds the function to use for generating the span name + // from the information found in the incoming HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var tags addedTags + r, traceEnd := h.startTrace(w, r) + defer traceEnd() + w, statsEnd := h.startStats(w, r) + defer statsEnd(&tags) + handler := h.Handler + if handler == nil { + handler = http.DefaultServeMux + } + r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) + handler.ServeHTTP(w, r) +} + +func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { + if isHealthEndpoint(r.URL.Path) { + return r, func() {} + } + var name string + if h.FormatSpanName == nil { + name = spanNameFromURL(r) + } else { + name = h.FormatSpanName(r) + } + ctx := r.Context() + + startOpts := h.StartOptions + if h.GetStartOptions != nil { + startOpts = h.GetStartOptions(r) + } + + var span *trace.Span + sc, ok := h.extractSpanContext(r) + if ok && !h.IsPublicEndpoint { + ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer)) + } else { + ctx, span = trace.StartSpan(ctx, name, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer), + ) + if ok { + span.AddLink(trace.Link{ + TraceID: sc.TraceID, + SpanID: sc.SpanID, + Type: trace.LinkTypeParent, + Attributes: nil, + }) + } + } + span.AddAttributes(requestAttrs(r)...) + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + } else if r.ContentLength > 0 { + span.AddMessageReceiveEvent(0, /* TODO: messageID */ + int64(r.ContentLength), -1) + } + return r.WithContext(ctx), span.End +} + +func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { + if h.Propagation == nil { + return defaultFormat.SpanContextFromRequest(r) + } + return h.Propagation.SpanContextFromRequest(r) +} + +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { + ctx, _ := tag.New(r.Context(), + tag.Upsert(Host, r.Host), + tag.Upsert(Path, r.URL.Path), + tag.Upsert(Method, r.Method)) + track := &trackingResponseWriter{ + start: time.Now(), + ctx: ctx, + writer: w, + } + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if r.ContentLength > 0 { + track.reqSize = r.ContentLength + } + stats.Record(ctx, ServerRequestCount.M(1)) + return track.wrappedResponseWriter(), track.end +} + +type trackingResponseWriter struct { + ctx context.Context + reqSize int64 + respSize int64 + start time.Time + statusCode int + statusLine string + endOnce sync.Once + writer http.ResponseWriter +} + +// Compile time assertion for ResponseWriter interface +var _ http.ResponseWriter = (*trackingResponseWriter)(nil) + +var logTagsErrorOnce sync.Once + +func (t *trackingResponseWriter) end(tags *addedTags) { + t.endOnce.Do(func() { + if t.statusCode == 0 { + t.statusCode = 200 + } + + span := trace.FromContext(t.ctx) + span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) + span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) + + m := []stats.Measurement{ + ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ServerResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ServerRequestBytes.M(t.reqSize)) + } + allTags := make([]tag.Mutator, len(tags.t)+1) + allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) + copy(allTags[1:], tags.t) + stats.RecordWithTags(t.ctx, allTags, m...) + }) +} + +func (t *trackingResponseWriter) Header() http.Header { + return t.writer.Header() +} + +func (t *trackingResponseWriter) Write(data []byte) (int, error) { + n, err := t.writer.Write(data) + t.respSize += int64(n) + // Add message event for request bytes sent. + span := trace.FromContext(t.ctx) + span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) + return n, err +} + +func (t *trackingResponseWriter) WriteHeader(statusCode int) { + t.writer.WriteHeader(statusCode) + t.statusCode = statusCode + t.statusLine = http.StatusText(t.statusCode) +} + +// wrappedResponseWriter returns a wrapped version of the original +// ResponseWriter and only implements the same combination of additional +// interfaces as the original. +// This implementation is based on https://github.com/felixge/httpsnoop. +func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { + var ( + hj, i0 = t.writer.(http.Hijacker) + cn, i1 = t.writer.(http.CloseNotifier) + pu, i2 = t.writer.(http.Pusher) + fl, i3 = t.writer.(http.Flusher) + rf, i4 = t.writer.(io.ReaderFrom) + ) + + switch { + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + }{t} + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + io.ReaderFrom + }{t, rf} + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Flusher + }{t, fl} + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{t, fl, rf} + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + }{t, pu} + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + io.ReaderFrom + }{t, pu, rf} + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + }{t, pu, fl} + case !i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + io.ReaderFrom + }{t, pu, fl, rf} + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + }{t, cn} + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{t, cn, rf} + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + }{t, cn, fl} + case !i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, cn, fl, rf} + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{t, cn, pu} + case !i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, cn, pu, rf} + case !i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + }{t, cn, pu, fl} + case !i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, cn, pu, fl, rf} + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + }{t, hj} + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{t, hj, rf} + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + }{t, hj, fl} + case i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + io.ReaderFrom + }{t, hj, fl, rf} + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + }{t, hj, pu} + case i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + io.ReaderFrom + }{t, hj, pu, rf} + case i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + }{t, hj, pu, fl} + case i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, pu, fl, rf} + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + }{t, hj, cn} + case i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + io.ReaderFrom + }{t, hj, cn, rf} + case i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + }{t, hj, cn, fl} + case i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, hj, cn, fl, rf} + case i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + }{t, hj, cn, pu} + case i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, hj, cn, pu, rf} + case i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + }{t, hj, cn, pu, fl} + case i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, cn, pu, fl, rf} + default: + return struct { + http.ResponseWriter + }{t} + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go new file mode 100644 index 000000000..05c6c56cc --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go @@ -0,0 +1,169 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "strings" + + "go.opencensus.io/trace" +) + +type spanAnnotator struct { + sp *trace.Span +} + +// TODO: Remove NewSpanAnnotator at the next release. + +// NewSpanAnnotator returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +// Deprecated: Use NewSpanAnnotatingClientTrace instead +func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { + return NewSpanAnnotatingClientTrace(r, s) +} + +// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { + sa := spanAnnotator{sp: s} + + return &httptrace.ClientTrace{ + GetConn: sa.getConn, + GotConn: sa.gotConn, + PutIdleConn: sa.putIdleConn, + GotFirstResponseByte: sa.gotFirstResponseByte, + Got100Continue: sa.got100Continue, + DNSStart: sa.dnsStart, + DNSDone: sa.dnsDone, + ConnectStart: sa.connectStart, + ConnectDone: sa.connectDone, + TLSHandshakeStart: sa.tlsHandshakeStart, + TLSHandshakeDone: sa.tlsHandshakeDone, + WroteHeaders: sa.wroteHeaders, + Wait100Continue: sa.wait100Continue, + WroteRequest: sa.wroteRequest, + } +} + +func (s spanAnnotator) getConn(hostPort string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.get_connection.host_port", hostPort), + } + s.sp.Annotate(attrs, "GetConn") +} + +func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { + attrs := []trace.Attribute{ + trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), + trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), + } + if info.WasIdle { + attrs = append(attrs, + trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) + } + s.sp.Annotate(attrs, "GotConn") +} + +// PutIdleConn implements a httptrace.ClientTrace hook +func (s spanAnnotator) putIdleConn(err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) + } + s.sp.Annotate(attrs, "PutIdleConn") +} + +func (s spanAnnotator) gotFirstResponseByte() { + s.sp.Annotate(nil, "GotFirstResponseByte") +} + +func (s spanAnnotator) got100Continue() { + s.sp.Annotate(nil, "Got100Continue") +} + +func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_start.host", info.Host), + } + s.sp.Annotate(attrs, "DNSStart") +} + +func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { + var addrs []string + for _, addr := range info.Addrs { + addrs = append(addrs, addr.String()) + } + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), + } + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "DNSDone") +} + +func (s spanAnnotator) connectStart(network, addr string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_start.network", network), + trace.StringAttribute("httptrace.connect_start.addr", addr), + } + s.sp.Annotate(attrs, "ConnectStart") +} + +func (s spanAnnotator) connectDone(network, addr string, err error) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_done.network", network), + trace.StringAttribute("httptrace.connect_done.addr", addr), + } + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.connect_done.error", err.Error())) + } + s.sp.Annotate(attrs, "ConnectDone") +} + +func (s spanAnnotator) tlsHandshakeStart() { + s.sp.Annotate(nil, "TLSHandshakeStart") +} + +func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) + } + s.sp.Annotate(attrs, "TLSHandshakeDone") +} + +func (s spanAnnotator) wroteHeaders() { + s.sp.Annotate(nil, "WroteHeaders") +} + +func (s spanAnnotator) wait100Continue() { + s.sp.Annotate(nil, "Wait100Continue") +} + +func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { + var attrs []trace.Attribute + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "WroteRequest") +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go new file mode 100644 index 000000000..63bbcda5e --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -0,0 +1,292 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// Deprecated: client HTTP measures. +var ( + // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. + ClientRequestCount = stats.Int64( + "opencensus.io/http/client/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + // Deprecated: Use ClientSentBytes. + ClientRequestBytes = stats.Int64( + "opencensus.io/http/client/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientReceivedBytes. + ClientResponseBytes = stats.Int64( + "opencensus.io/http/client/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientRoundtripLatency. + ClientLatency = stats.Float64( + "opencensus.io/http/client/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + +// The following client HTTP measures are supported for use in custom views. +var ( + ClientSentBytes = stats.Int64( + "opencensus.io/http/client/sent_bytes", + "Total bytes sent in request body (not including headers)", + stats.UnitBytes, + ) + ClientReceivedBytes = stats.Int64( + "opencensus.io/http/client/received_bytes", + "Total bytes received in response bodies (not including headers but including error responses with bodies)", + stats.UnitBytes, + ) + ClientRoundtripLatency = stats.Float64( + "opencensus.io/http/client/roundtrip_latency", + "Time between first byte of request headers sent to last byte of response received, or terminal error", + stats.UnitMilliseconds, + ) +) + +// The following server HTTP measures are supported for use in custom views: +var ( + ServerRequestCount = stats.Int64( + "opencensus.io/http/server/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + ServerRequestBytes = stats.Int64( + "opencensus.io/http/server/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + ServerResponseBytes = stats.Int64( + "opencensus.io/http/server/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + ServerLatency = stats.Float64( + "opencensus.io/http/server/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + +// The following tags are applied to stats recorded by this package. Host, Path +// and Method are applied to all measures. StatusCode is not applied to +// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. +var ( + // Host is the value of the HTTP Host header. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Host, _ = tag.NewKey("http.host") + + // StatusCode is the numeric HTTP response status code, + // or "error" if a transport error occurred and no status code was read. + StatusCode, _ = tag.NewKey("http.status") + + // Path is the URL path (not including query string) in the request. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Path, _ = tag.NewKey("http.path") + + // Method is the HTTP method of the request, capitalized (GET, POST, etc.). + Method, _ = tag.NewKey("http.method") + + // KeyServerRoute is a low cardinality string representing the logical + // handler of the request. This is usually the pattern registered on the a + // ServeMux (or similar string). + KeyServerRoute, _ = tag.NewKey("http_server_route") +) + +// Client tag keys. +var ( + // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). + KeyClientMethod, _ = tag.NewKey("http_client_method") + // KeyClientPath is the URL path (not including query string). + KeyClientPath, _ = tag.NewKey("http_client_path") + // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. + KeyClientStatus, _ = tag.NewKey("http_client_status") + // KeyClientHost is the value of the request Host header. + KeyClientHost, _ = tag.NewKey("http_client_host") +) + +// Default distributions used by views in this package. +var ( + DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// Package ochttp provides some convenience views for client measures. +// You still need to register these views for data to actually be collected. +var ( + ClientSentBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/sent_bytes", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientReceivedBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/received_bytes", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientRoundtripLatencyDistribution = &view.View{ + Name: "opencensus.io/http/client/roundtrip_latency", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + Description: "End-to-end latency, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientCompletedCount = &view.View{ + Name: "opencensus.io/http/client/completed_count", + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + Description: "Count of completed requests, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } +) + +// Deprecated: Old client Views. +var ( + // Deprecated: No direct replacement, but see ClientCompletedCount. + ClientRequestCountView = &view.View{ + Name: "opencensus.io/http/client/request_count", + Description: "Count of HTTP requests started", + Measure: ClientRequestCount, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientSentBytesDistribution. + ClientRequestBytesView = &view.View{ + Name: "opencensus.io/http/client/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientReceivedBytesDistribution instead. + ClientResponseBytesView = &view.View{ + Name: "opencensus.io/http/client/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientRoundtripLatencyDistribution instead. + ClientLatencyView = &view.View{ + Name: "opencensus.io/http/client/latency", + Description: "Latency distribution of HTTP requests", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + } + + // Deprecated: Use ClientCompletedCount instead. + ClientRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/client/request_count_by_method", + Description: "Client request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ClientSentBytes, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientCompletedCount instead. + ClientResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/client/response_count_by_status_code", + Description: "Client response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + } +) + +// Package ochttp provides some convenience views for server measures. +// You still need to register these views for data to actually be collected. +var ( + ServerRequestCountView = &view.View{ + Name: "opencensus.io/http/server/request_count", + Description: "Count of HTTP requests started", + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerRequestBytesView = &view.View{ + Name: "opencensus.io/http/server/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ServerRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerResponseBytesView = &view.View{ + Name: "opencensus.io/http/server/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ServerResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerLatencyView = &view.View{ + Name: "opencensus.io/http/server/latency", + Description: "Latency distribution of HTTP requests", + Measure: ServerLatency, + Aggregation: DefaultLatencyDistribution, + } + + ServerRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/server/request_count_by_method", + Description: "Server request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/server/response_count_by_status_code", + Description: "Server response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ServerLatency, + Aggregation: view.Count(), + } +) + +// DefaultClientViews are the default client views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultClientViews = []*view.View{ + ClientRequestCountView, + ClientRequestBytesView, + ClientResponseBytesView, + ClientLatencyView, + ClientRequestCountByMethod, + ClientResponseCountByStatusCode, +} + +// DefaultServerViews are the default server views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultServerViews = []*view.View{ + ServerRequestCountView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerLatencyView, + ServerRequestCountByMethod, + ServerResponseCountByStatusCode, +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go new file mode 100644 index 000000000..c23b97fb1 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -0,0 +1,239 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" + "net/http" + "net/http/httptrace" + + "go.opencensus.io/plugin/ochttp/propagation/b3" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// TODO(jbd): Add godoc examples. + +var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} + +// Attributes recorded on the span for the requests. +// Only trace exporters will need them. +const ( + HostAttribute = "http.host" + MethodAttribute = "http.method" + PathAttribute = "http.path" + URLAttribute = "http.url" + UserAgentAttribute = "http.user_agent" + StatusCodeAttribute = "http.status_code" +) + +type traceTransport struct { + base http.RoundTripper + startOptions trace.StartOptions + format propagation.HTTPFormat + formatSpanName func(*http.Request) string + newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace +} + +// TODO(jbd): Add message events for request and response size. + +// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. +// The created span can follow a parent span, if a parent is presented in +// the request's context. +func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { + name := t.formatSpanName(req) + // TODO(jbd): Discuss whether we want to prefix + // outgoing requests with Sent. + ctx, span := trace.StartSpan(req.Context(), name, + trace.WithSampler(t.startOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) + + if t.newClientTrace != nil { + req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) + } else { + req = req.WithContext(ctx) + } + + if t.format != nil { + // SpanContextToRequest will modify its Request argument, which is + // contrary to the contract for http.RoundTripper, so we need to + // pass it a copy of the Request. + // However, the Request struct itself was already copied by + // the WithContext calls above and so we just need to copy the header. + header := make(http.Header) + for k, v := range req.Header { + header[k] = v + } + req.Header = header + t.format.SpanContextToRequest(span.SpanContext(), req) + } + + span.AddAttributes(requestAttrs(req)...) + resp, err := t.base.RoundTrip(req) + if err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + span.End() + return resp, err + } + + span.AddAttributes(responseAttrs(resp)...) + span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) + + // span.End() will be invoked after + // a read from resp.Body returns io.EOF or when + // resp.Body.Close() is invoked. + bt := &bodyTracker{rc: resp.Body, span: span} + resp.Body = wrappedBody(bt, resp.Body) + return resp, err +} + +// bodyTracker wraps a response.Body and invokes +// trace.EndSpan on encountering io.EOF on reading +// the body of the original response. +type bodyTracker struct { + rc io.ReadCloser + span *trace.Span +} + +var _ io.ReadCloser = (*bodyTracker)(nil) + +func (bt *bodyTracker) Read(b []byte) (int, error) { + n, err := bt.rc.Read(b) + + switch err { + case nil: + return n, nil + case io.EOF: + bt.span.End() + default: + // For all other errors, set the span status + bt.span.SetStatus(trace.Status{ + // Code 2 is the error code for Internal server error. + Code: 2, + Message: err.Error(), + }) + } + return n, err +} + +func (bt *bodyTracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + bt.span.End() + return bt.rc.Close() +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *traceTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +func spanNameFromURL(req *http.Request) string { + return req.URL.Path +} + +func requestAttrs(r *http.Request) []trace.Attribute { + userAgent := r.UserAgent() + + attrs := make([]trace.Attribute, 0, 5) + attrs = append(attrs, + trace.StringAttribute(PathAttribute, r.URL.Path), + trace.StringAttribute(URLAttribute, r.URL.String()), + trace.StringAttribute(HostAttribute, r.Host), + trace.StringAttribute(MethodAttribute, r.Method), + ) + + if userAgent != "" { + attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) + } + + return attrs +} + +func responseAttrs(resp *http.Response) []trace.Attribute { + return []trace.Attribute{ + trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), + } +} + +// TraceStatus is a utility to convert the HTTP status code to a trace.Status that +// represents the outcome as closely as possible. +func TraceStatus(httpStatusCode int, statusLine string) trace.Status { + var code int32 + if httpStatusCode < 200 || httpStatusCode >= 400 { + code = trace.StatusCodeUnknown + } + switch httpStatusCode { + case 499: + code = trace.StatusCodeCancelled + case http.StatusBadRequest: + code = trace.StatusCodeInvalidArgument + case http.StatusGatewayTimeout: + code = trace.StatusCodeDeadlineExceeded + case http.StatusNotFound: + code = trace.StatusCodeNotFound + case http.StatusForbidden: + code = trace.StatusCodePermissionDenied + case http.StatusUnauthorized: // 401 is actually unauthenticated. + code = trace.StatusCodeUnauthenticated + case http.StatusTooManyRequests: + code = trace.StatusCodeResourceExhausted + case http.StatusNotImplemented: + code = trace.StatusCodeUnimplemented + case http.StatusServiceUnavailable: + code = trace.StatusCodeUnavailable + case http.StatusOK: + code = trace.StatusCodeOK + } + return trace.Status{Code: code, Message: codeToStr[code]} +} + +var codeToStr = map[int32]string{ + trace.StatusCodeOK: `OK`, + trace.StatusCodeCancelled: `CANCELLED`, + trace.StatusCodeUnknown: `UNKNOWN`, + trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, + trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, + trace.StatusCodeNotFound: `NOT_FOUND`, + trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, + trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, + trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, + trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, + trace.StatusCodeAborted: `ABORTED`, + trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, + trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, + trace.StatusCodeInternal: `INTERNAL`, + trace.StatusCodeUnavailable: `UNAVAILABLE`, + trace.StatusCodeDataLoss: `DATA_LOSS`, + trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, +} + +func isHealthEndpoint(path string) bool { + // Health checking is pretty frequent and + // traces collected for health endpoints + // can be extremely noisy and expensive. + // Disable canonical health checking endpoints + // like /healthz and /_ah/health for now. + if path == "/healthz" || path == "/_ah/health" { + return true + } + return false +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go new file mode 100644 index 000000000..7d75cae2b --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go @@ -0,0 +1,44 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" +) + +// wrappedBody returns a wrapped version of the original +// Body and only implements the same combination of additional +// interfaces as the original. +func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { + var ( + wr, i0 = body.(io.Writer) + ) + switch { + case !i0: + return struct { + io.ReadCloser + }{wrapper} + + case i0: + return struct { + io.ReadCloser + io.Writer + }{wrapper, wr} + default: + return struct { + io.ReadCloser + }{wrapper} + } +} diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go new file mode 100644 index 000000000..b1764e1d3 --- /dev/null +++ b/vendor/go.opencensus.io/resource/resource.go @@ -0,0 +1,164 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides functionality for resource, which capture +// identifying information about the entities for which signals are exported. +package resource + +import ( + "context" + "fmt" + "os" + "regexp" + "sort" + "strconv" + "strings" +) + +// Environment variables used by FromEnv to decode a resource. +const ( + EnvVarType = "OC_RESOURCE_TYPE" + EnvVarLabels = "OC_RESOURCE_LABELS" +) + +// Resource describes an entity about which identifying information and metadata is exposed. +// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. +type Resource struct { + Type string + Labels map[string]string +} + +// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. +func EncodeLabels(labels map[string]string) string { + sortedKeys := make([]string, 0, len(labels)) + for k := range labels { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + s := "" + for i, k := range sortedKeys { + if i > 0 { + s += "," + } + s += k + "=" + strconv.Quote(labels[k]) + } + return s +} + +var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) + +// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. +// A list of labels of the form `="",="",...` is accepted. +// Domain names and paths are accepted as label keys. +// Most users will want to use FromEnv instead. +func DecodeLabels(s string) (map[string]string, error) { + m := map[string]string{} + // Ensure a trailing comma, which allows us to keep the regex simpler + s = strings.TrimRight(strings.TrimSpace(s), ",") + "," + + for len(s) > 0 { + match := labelRegex.FindStringSubmatch(s) + if len(match) == 0 { + return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) + } + v := match[2] + if v == "" { + v = match[3] + } else { + var err error + if v, err = strconv.Unquote(v); err != nil { + return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) + } + } + m[match[1]] = v + + s = s[len(match[0]):] + } + return m, nil +} + +// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE +// and OC_RESOURCE_labelS environment variables. +func FromEnv(context.Context) (*Resource, error) { + res := &Resource{ + Type: strings.TrimSpace(os.Getenv(EnvVarType)), + } + labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) + if labels == "" { + return res, nil + } + var err error + if res.Labels, err = DecodeLabels(labels); err != nil { + return nil, err + } + return res, nil +} + +var _ Detector = FromEnv + +// merge resource information from b into a. In case of a collision, a takes precedence. +func merge(a, b *Resource) *Resource { + if a == nil { + return b + } + if b == nil { + return a + } + res := &Resource{ + Type: a.Type, + Labels: map[string]string{}, + } + if res.Type == "" { + res.Type = b.Type + } + for k, v := range b.Labels { + res.Labels[k] = v + } + // Labels from resource a overwrite labels from resource b. + for k, v := range a.Labels { + res.Labels[k] = v + } + return res +} + +// Detector attempts to detect resource information. +// If the detector cannot find resource information, the returned resource is nil but no +// error is returned. +// An error is only returned on unexpected failures. +type Detector func(context.Context) (*Resource, error) + +// MultiDetector returns a Detector that calls all input detectors in order and +// merges each result with the previous one. In case a type of label key is already set, +// the first set value is takes precedence. +// It returns on the first error that a sub-detector encounters. +func MultiDetector(detectors ...Detector) Detector { + return func(ctx context.Context) (*Resource, error) { + return detectAll(ctx, detectors...) + } +} + +// detectall calls all input detectors sequentially an merges each result with the previous one. +// It returns on the first error that a sub-detector encounters. +func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { + var res *Resource + for _, d := range detectors { + r, err := d(ctx) + if err != nil { + return nil, err + } + res = merge(res, r) + } + return res, nil +} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go new file mode 100644 index 000000000..00d473ee0 --- /dev/null +++ b/vendor/go.opencensus.io/stats/doc.go @@ -0,0 +1,69 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package stats contains support for OpenCensus stats recording. + +OpenCensus allows users to create typed measures, record measurements, +aggregate the collected data, and export the aggregated data. + +Measures + +A measure represents a type of data point to be tracked and recorded. +For example, latency, request Mb/s, and response Mb/s are measures +to collect from a server. + +Measure constructors such as Int64 and Float64 automatically +register the measure by the given name. Each registered measure needs +to be unique by name. Measures also have a description and a unit. + +Libraries can define and export measures. Application authors can then +create views and collect and break down measures by the tags they are +interested in. + +Recording measurements + +Measurement is a data point to be collected for a measure. For example, +for a latency (ms) measure, 100 is a measurement that represents a 100ms +latency event. Measurements are created from measures with +the current context. Tags from the current context are recorded with the +measurements if they are any. + +Recorded measurements are dropped immediately if no views are registered for them. +There is usually no need to conditionally enable and disable +recording to reduce cost. Recording of measurements is cheap. + +Libraries can always record measurements, and applications can later decide +on which measurements they want to collect by registering views. This allows +libraries to turn on the instrumentation by default. + +Exemplars + +For a given recorded measurement, the associated exemplar is a diagnostic map +that gives more information about the measurement. + +When aggregated using a Distribution aggregation, an exemplar is kept for each +bucket in the Distribution. This allows you to easily find an example of a +measurement that fell into each bucket. + +For example, if you also use the OpenCensus trace package and you +record a measurement with a context that contains a sampled trace span, +then the trace span will be added to the exemplar associated with the measurement. + +When exported to a supporting back end, you should be able to easily navigate +to example traces that fell into each bucket in the Distribution. + +*/ +package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go new file mode 100644 index 000000000..36935e629 --- /dev/null +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "go.opencensus.io/tag" +) + +// DefaultRecorder will be called for each Record call. +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) + +// SubscriptionReporter reports when a view subscribed with a measure. +var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go new file mode 100644 index 000000000..1ffd3cefc --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure.go @@ -0,0 +1,109 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "sync" + "sync/atomic" +) + +// Measure represents a single numeric value to be tracked and recorded. +// For example, latency, request bytes, and response bytes could be measures +// to collect from a server. +// +// Measures by themselves have no outside effects. In order to be exported, +// the measure needs to be used in a View. If no Views are defined over a +// measure, there is very little cost in recording it. +type Measure interface { + // Name returns the name of this measure. + // + // Measure names are globally unique (among all libraries linked into your program). + // We recommend prefixing the measure name with a domain name relevant to your + // project or application. + // + // Measure names are never sent over the wire or exported to backends. + // They are only used to create Views. + Name() string + + // Description returns the human-readable description of this measure. + Description() string + + // Unit returns the units for the values this measure takes on. + // + // Units are encoded according to the case-sensitive abbreviations from the + // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html + Unit() string +} + +// measureDescriptor is the untyped descriptor associated with each measure. +// Int64Measure and Float64Measure wrap measureDescriptor to provide typed +// recording APIs. +// Two Measures with the same name will have the same measureDescriptor. +type measureDescriptor struct { + subs int32 // access atomically + + name string + description string + unit string +} + +func (m *measureDescriptor) subscribe() { + atomic.StoreInt32(&m.subs, 1) +} + +func (m *measureDescriptor) subscribed() bool { + return atomic.LoadInt32(&m.subs) == 1 +} + +var ( + mu sync.RWMutex + measures = make(map[string]*measureDescriptor) +) + +func registerMeasureHandle(name, desc, unit string) *measureDescriptor { + mu.Lock() + defer mu.Unlock() + + if stored, ok := measures[name]; ok { + return stored + } + m := &measureDescriptor{ + name: name, + description: desc, + unit: unit, + } + measures[name] = m + return m +} + +// Measurement is the numeric value measured when recording stats. Each measure +// provides methods to create measurements of their kind. For example, Int64Measure +// provides M to convert an int64 into a measurement. +type Measurement struct { + v float64 + m Measure + desc *measureDescriptor +} + +// Value returns the value of the Measurement as a float64. +func (m Measurement) Value() float64 { + return m.v +} + +// Measure returns the Measure from which this Measurement was created. +func (m Measurement) Measure() Measure { + return m.m +} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go new file mode 100644 index 000000000..f02c1eda8 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Float64Measure is a measure for float64 values. +type Float64Measure struct { + desc *measureDescriptor +} + +// M creates a new float64 measurement. +// Use Record to record measurements. +func (m *Float64Measure) M(v float64) Measurement { + return Measurement{ + m: m, + desc: m.desc, + v: v, + } +} + +// Float64 creates a new measure for float64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Float64(name, description, unit string) *Float64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Float64Measure{mi} +} + +// Name returns the name of the measure. +func (m *Float64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Float64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Float64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go new file mode 100644 index 000000000..d101d7973 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Int64Measure is a measure for int64 values. +type Int64Measure struct { + desc *measureDescriptor +} + +// M creates a new int64 measurement. +// Use Record to record measurements. +func (m *Int64Measure) M(v int64) Measurement { + return Measurement{ + m: m, + desc: m.desc, + v: float64(v), + } +} + +// Int64 creates a new measure for int64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Int64(name, description, unit string) *Int64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Int64Measure{mi} +} + +// Name returns the name of the measure. +func (m *Int64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Int64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Int64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go new file mode 100644 index 000000000..ad4691184 --- /dev/null +++ b/vendor/go.opencensus.io/stats/record.go @@ -0,0 +1,117 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "context" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + internal.SubscriptionReporter = func(measure string) { + mu.Lock() + measures[measure].subscribe() + mu.Unlock() + } +} + +type recordOptions struct { + attachments metricdata.Attachments + mutators []tag.Mutator + measurements []Measurement +} + +// WithAttachments applies provided exemplar attachments. +func WithAttachments(attachments metricdata.Attachments) Options { + return func(ro *recordOptions) { + ro.attachments = attachments + } +} + +// WithTags applies provided tag mutators. +func WithTags(mutators ...tag.Mutator) Options { + return func(ro *recordOptions) { + ro.mutators = mutators + } +} + +// WithMeasurements applies provided measurements. +func WithMeasurements(measurements ...Measurement) Options { + return func(ro *recordOptions) { + ro.measurements = measurements + } +} + +// Options apply changes to recordOptions. +type Options func(*recordOptions) + +func createRecordOption(ros ...Options) *recordOptions { + o := &recordOptions{} + for _, ro := range ros { + ro(o) + } + return o +} + +// Record records one or multiple measurements with the same context at once. +// If there are any tags in the context, measurements will be tagged with them. +func Record(ctx context.Context, ms ...Measurement) { + RecordWithOptions(ctx, WithMeasurements(ms...)) +} + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) +} + +// RecordWithOptions records measurements from the given options (if any) against context +// and tags and attachments in the options (if any). +// If there are any tags in the context, measurements will be tagged with them. +func RecordWithOptions(ctx context.Context, ros ...Options) error { + o := createRecordOption(ros...) + if len(o.measurements) == 0 { + return nil + } + recorder := internal.DefaultRecorder + if recorder == nil { + return nil + } + record := false + for _, m := range o.measurements { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return nil + } + if len(o.mutators) > 0 { + var err error + if ctx, err = tag.New(ctx, o.mutators...); err != nil { + return err + } + } + recorder(tag.FromContext(ctx), o.measurements, o.attachments) + return nil +} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go new file mode 100644 index 000000000..6931a5f29 --- /dev/null +++ b/vendor/go.opencensus.io/stats/units.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Units are encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +const ( + UnitNone = "1" // Deprecated: Use UnitDimensionless. + UnitDimensionless = "1" + UnitBytes = "By" + UnitMilliseconds = "ms" +) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go new file mode 100644 index 000000000..b7f169b4a --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -0,0 +1,120 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +// AggType represents the type of aggregation function used on a View. +type AggType int + +// All available aggregation types. +const ( + AggTypeNone AggType = iota // no aggregation; reserved for future use. + AggTypeCount // the count aggregation, see Count. + AggTypeSum // the sum aggregation, see Sum. + AggTypeDistribution // the distribution aggregation, see Distribution. + AggTypeLastValue // the last value aggregation, see LastValue. +) + +func (t AggType) String() string { + return aggTypeName[t] +} + +var aggTypeName = map[AggType]string{ + AggTypeNone: "None", + AggTypeCount: "Count", + AggTypeSum: "Sum", + AggTypeDistribution: "Distribution", + AggTypeLastValue: "LastValue", +} + +// Aggregation represents a data aggregation method. Use one of the functions: +// Count, Sum, or Distribution to construct an Aggregation. +type Aggregation struct { + Type AggType // Type is the AggType of this Aggregation. + Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. + + newData func() AggregationData +} + +var ( + aggCount = &Aggregation{ + Type: AggTypeCount, + newData: func() AggregationData { + return &CountData{} + }, + } + aggSum = &Aggregation{ + Type: AggTypeSum, + newData: func() AggregationData { + return &SumData{} + }, + } +) + +// Count indicates that data collected and aggregated +// with this method will be turned into a count value. +// For example, total number of accepted requests can be +// aggregated by using Count. +func Count() *Aggregation { + return aggCount +} + +// Sum indicates that data collected and aggregated +// with this method will be summed up. +// For example, accumulated request bytes can be aggregated by using +// Sum. +func Sum() *Aggregation { + return aggSum +} + +// Distribution indicates that the desired aggregation is +// a histogram distribution. +// +// An distribution aggregation may contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described +// by the bounds. This defines len(bounds)+1 buckets. +// +// If len(bounds) >= 2 then the boundaries for bucket index i are: +// +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length +// +// If len(bounds) is 0 then there is no histogram associated with the +// distribution. There will be a single bucket with boundaries +// (-infinity, +infinity). +// +// If len(bounds) is 1 then there is no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +func Distribution(bounds ...float64) *Aggregation { + return &Aggregation{ + Type: AggTypeDistribution, + Buckets: bounds, + newData: func() AggregationData { + return newDistributionData(bounds) + }, + } +} + +// LastValue only reports the last value recorded using this +// aggregation. All other measurements will be dropped. +func LastValue() *Aggregation { + return &Aggregation{ + Type: AggTypeLastValue, + newData: func() AggregationData { + return &LastValueData{} + }, + } +} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go new file mode 100644 index 000000000..d500e67f7 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -0,0 +1,293 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "math" + "time" + + "go.opencensus.io/metric/metricdata" +) + +// AggregationData represents an aggregated value from a collection. +// They are reported on the view data during exporting. +// Mosts users won't directly access aggregration data. +type AggregationData interface { + isAggregationData() bool + addSample(v float64, attachments map[string]interface{}, t time.Time) + clone() AggregationData + equal(other AggregationData) bool + toPoint(t metricdata.Type, time time.Time) metricdata.Point +} + +const epsilon = 1e-9 + +// CountData is the aggregated data for the Count aggregation. +// A count aggregation processes data and counts the recordings. +// +// Most users won't directly access count data. +type CountData struct { + Value int64 +} + +func (a *CountData) isAggregationData() bool { return true } + +func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { + a.Value = a.Value + 1 +} + +func (a *CountData) clone() AggregationData { + return &CountData{Value: a.Value} +} + +func (a *CountData) equal(other AggregationData) bool { + a2, ok := other.(*CountData) + if !ok { + return false + } + + return a.Value == a2.Value +} + +func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// SumData is the aggregated data for the Sum aggregation. +// A sum aggregation processes data and sums up the recordings. +// +// Most users won't directly access sum data. +type SumData struct { + Value float64 +} + +func (a *SumData) isAggregationData() bool { return true } + +func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + a.Value += v +} + +func (a *SumData) clone() AggregationData { + return &SumData{Value: a.Value} +} + +func (a *SumData) equal(other AggregationData) bool { + a2, ok := other.(*SumData) + if !ok { + return false + } + return math.Pow(a.Value-a2.Value, 2) < epsilon +} + +func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, int64(a.Value)) + case metricdata.TypeCumulativeFloat64: + return metricdata.NewFloat64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// DistributionData is the aggregated data for the +// Distribution aggregation. +// +// Most users won't directly access distribution data. +// +// For a distribution with N bounds, the associated DistributionData will have +// N+1 buckets. +type DistributionData struct { + Count int64 // number of data points aggregated + Min float64 // minimum value in the distribution + Max float64 // max value in the distribution + Mean float64 // mean of the distribution + SumOfSquaredDev float64 // sum of the squared deviation from the mean + CountPerBucket []int64 // number of occurrences per bucket + // ExemplarsPerBucket is slice the same length as CountPerBucket containing + // an exemplar for the associated bucket, or nil. + ExemplarsPerBucket []*metricdata.Exemplar + bounds []float64 // histogram distribution of the values +} + +func newDistributionData(bounds []float64) *DistributionData { + bucketCount := len(bounds) + 1 + return &DistributionData{ + CountPerBucket: make([]int64, bucketCount), + ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), + bounds: bounds, + Min: math.MaxFloat64, + Max: math.SmallestNonzeroFloat64, + } +} + +// Sum returns the sum of all samples collected. +func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } + +func (a *DistributionData) variance() float64 { + if a.Count <= 1 { + return 0 + } + return a.SumOfSquaredDev / float64(a.Count-1) +} + +func (a *DistributionData) isAggregationData() bool { return true } + +// TODO(songy23): support exemplar attachments. +func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { + if v < a.Min { + a.Min = v + } + if v > a.Max { + a.Max = v + } + a.Count++ + a.addToBucket(v, attachments, t) + + if a.Count == 1 { + a.Mean = v + return + } + + oldMean := a.Mean + a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) +} + +func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { + var count *int64 + var i int + var b float64 + for i, b = range a.bounds { + if v < b { + count = &a.CountPerBucket[i] + break + } + } + if count == nil { // Last bucket. + i = len(a.bounds) + count = &a.CountPerBucket[i] + } + *count++ + if exemplar := getExemplar(v, attachments, t); exemplar != nil { + a.ExemplarsPerBucket[i] = exemplar + } +} + +func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { + if len(attachments) == 0 { + return nil + } + return &metricdata.Exemplar{ + Value: v, + Timestamp: t, + Attachments: attachments, + } +} + +func (a *DistributionData) clone() AggregationData { + c := *a + c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) + c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) + return &c +} + +func (a *DistributionData) equal(other AggregationData) bool { + a2, ok := other.(*DistributionData) + if !ok { + return false + } + if a2 == nil { + return false + } + if len(a.CountPerBucket) != len(a2.CountPerBucket) { + return false + } + for i := range a.CountPerBucket { + if a.CountPerBucket[i] != a2.CountPerBucket[i] { + return false + } + } + return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon +} + +func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeDistribution: + buckets := []metricdata.Bucket{} + for i := 0; i < len(a.CountPerBucket); i++ { + buckets = append(buckets, metricdata.Bucket{ + Count: a.CountPerBucket[i], + Exemplar: a.ExemplarsPerBucket[i], + }) + } + bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} + + val := &metricdata.Distribution{ + Count: a.Count, + Sum: a.Sum(), + SumOfSquaredDeviation: a.SumOfSquaredDev, + BucketOptions: bucketOptions, + Buckets: buckets, + } + return metricdata.NewDistributionPoint(t, val) + + default: + // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. + panic("unsupported metricdata.Type") + } +} + +// LastValueData returns the last value recorded for LastValue aggregation. +type LastValueData struct { + Value float64 +} + +func (l *LastValueData) isAggregationData() bool { + return true +} + +func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + l.Value = v +} + +func (l *LastValueData) clone() AggregationData { + return &LastValueData{l.Value} +} + +func (l *LastValueData) equal(other AggregationData) bool { + a2, ok := other.(*LastValueData) + if !ok { + return false + } + return l.Value == a2.Value +} + +func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeGaugeInt64: + return metricdata.NewInt64Point(t, int64(l.Value)) + case metricdata.TypeGaugeFloat64: + return metricdata.NewFloat64Point(t, l.Value) + default: + panic("unsupported metricdata.Type") + } +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go new file mode 100644 index 000000000..8a6a2c0fd --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -0,0 +1,86 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "sort" + "time" + + "go.opencensus.io/internal/tagencoding" + "go.opencensus.io/tag" +) + +type collector struct { + // signatures holds the aggregations values for each unique tag signature + // (values for all keys) to its aggregator. + signatures map[string]AggregationData + // Aggregation is the description of the aggregation to perform for this + // view. + a *Aggregation +} + +func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { + aggregator, ok := c.signatures[s] + if !ok { + aggregator = c.a.newData() + c.signatures[s] = aggregator + } + aggregator.addSample(v, attachments, t) +} + +// collectRows returns a snapshot of the collected Row values. +func (c *collector) collectedRows(keys []tag.Key) []*Row { + rows := make([]*Row, 0, len(c.signatures)) + for sig, aggregator := range c.signatures { + tags := decodeTags([]byte(sig), keys) + row := &Row{Tags: tags, Data: aggregator.clone()} + rows = append(rows, row) + } + return rows +} + +func (c *collector) clearRows() { + c.signatures = make(map[string]AggregationData) +} + +// encodeWithKeys encodes the map by using values +// only associated with the keys provided. +func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + vb := &tagencoding.Values{ + Buffer: make([]byte, len(keys)), + } + for _, k := range keys { + v, _ := m.Value(k) + vb.WriteValue([]byte(v)) + } + return vb.Bytes() +} + +// decodeTags decodes tags from the buffer and +// orders them by the keys. +func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { + vb := &tagencoding.Values{Buffer: buf} + var tags []tag.Tag + for _, k := range keys { + v := vb.ReadValue() + if v != nil { + tags = append(tags, tag.Tag{Key: k, Value: string(v)}) + } + } + vb.ReadIndex = 0 + sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) + return tags +} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go new file mode 100644 index 000000000..dced225c3 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -0,0 +1,47 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package view contains support for collecting and exposing aggregates over stats. +// +// In order to collect measurements, views need to be defined and registered. +// A view allows recorded measurements to be filtered and aggregated. +// +// All recorded measurements can be grouped by a list of tags. +// +// OpenCensus provides several aggregation methods: Count, Distribution and Sum. +// +// Count only counts the number of measurement points recorded. +// Distribution provides statistical summary of the aggregated data by counting +// how many recorded measurements fall into each bucket. +// Sum adds up the measurement values. +// LastValue just keeps track of the most recently recorded measurement value. +// All aggregations are cumulative. +// +// Views can be registerd and unregistered at any time during program execution. +// +// Libraries can define views but it is recommended that in most cases registering +// views be left up to applications. +// +// Exporting +// +// Collected and aggregated data can be exported to a metric collection +// backend by registering its exporter. +// +// Multiple exporters can be registered to upload the data to various +// different back ends. +package view // import "go.opencensus.io/stats/view" + +// TODO(acetechnologist): Add a link to the language independent OpenCensus +// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go new file mode 100644 index 000000000..7cb59718f --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -0,0 +1,58 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package view + +import "sync" + +var ( + exportersMu sync.RWMutex // guards exporters + exporters = make(map[Exporter]struct{}) +) + +// Exporter exports the collected records as view data. +// +// The ExportView method should return quickly; if an +// Exporter takes a significant amount of time to +// process a Data, that work should be done on another goroutine. +// +// It is safe to assume that ExportView will not be called concurrently from +// multiple goroutines. +// +// The Data should not be modified. +type Exporter interface { + ExportView(viewData *Data) +} + +// RegisterExporter registers an exporter. +// Collected data will be reported via all the +// registered exporters. Once you no longer +// want data to be exported, invoke UnregisterExporter +// with the previously registered exporter. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + exporters[e] = struct{}{} +} + +// UnregisterExporter unregisters an exporter. +func UnregisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + delete(exporters, e) +} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go new file mode 100644 index 000000000..37f88e1d9 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -0,0 +1,221 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "sync/atomic" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// View allows users to aggregate the recorded stats.Measurements. +// Views need to be passed to the Register function to be before data will be +// collected and sent to Exporters. +type View struct { + Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. + Description string // Description is a human-readable description for this view. + + // TagKeys are the tag keys describing the grouping of this view. + // A single Row will be produced for each combination of associated tag values. + TagKeys []tag.Key + + // Measure is a stats.Measure to aggregate in this view. + Measure stats.Measure + + // Aggregation is the aggregation function tp apply to the set of Measurements. + Aggregation *Aggregation +} + +// WithName returns a copy of the View with a new name. This is useful for +// renaming views to cope with limitations placed on metric names by various +// backends. +func (v *View) WithName(name string) *View { + vNew := *v + vNew.Name = name + return &vNew +} + +// same compares two views and returns true if they represent the same aggregation. +func (v *View) same(other *View) bool { + if v == other { + return true + } + if v == nil { + return false + } + return reflect.DeepEqual(v.Aggregation, other.Aggregation) && + v.Measure.Name() == other.Measure.Name() +} + +// ErrNegativeBucketBounds error returned if histogram contains negative bounds. +// +// Deprecated: this should not be public. +var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") + +// canonicalize canonicalizes v by setting explicit +// defaults for Name and Description and sorting the TagKeys +func (v *View) canonicalize() error { + if v.Measure == nil { + return fmt.Errorf("cannot register view %q: measure not set", v.Name) + } + if v.Aggregation == nil { + return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) + } + if v.Name == "" { + v.Name = v.Measure.Name() + } + if v.Description == "" { + v.Description = v.Measure.Description() + } + if err := checkViewName(v.Name); err != nil { + return err + } + sort.Slice(v.TagKeys, func(i, j int) bool { + return v.TagKeys[i].Name() < v.TagKeys[j].Name() + }) + sort.Float64s(v.Aggregation.Buckets) + for _, b := range v.Aggregation.Buckets { + if b < 0 { + return ErrNegativeBucketBounds + } + } + // drop 0 bucket silently. + v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) + + return nil +} + +func dropZeroBounds(bounds ...float64) []float64 { + for i, bound := range bounds { + if bound > 0 { + return bounds[i:] + } + } + return []float64{} +} + +// viewInternal is the internal representation of a View. +type viewInternal struct { + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector + metricDescriptor *metricdata.Descriptor +} + +func newViewInternal(v *View) (*viewInternal, error) { + return &viewInternal{ + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + metricDescriptor: viewToMetricDescriptor(v), + }, nil +} + +func (v *viewInternal) subscribe() { + atomic.StoreUint32(&v.subscribed, 1) +} + +func (v *viewInternal) unsubscribe() { + atomic.StoreUint32(&v.subscribed, 0) +} + +// isSubscribed returns true if the view is exporting +// data by subscription. +func (v *viewInternal) isSubscribed() bool { + return atomic.LoadUint32(&v.subscribed) == 1 +} + +func (v *viewInternal) clearRows() { + v.collector.clearRows() +} + +func (v *viewInternal) collectedRows() []*Row { + return v.collector.collectedRows(v.view.TagKeys) +} + +func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { + if !v.isSubscribed() { + return + } + sig := string(encodeWithKeys(m, v.view.TagKeys)) + v.collector.addSample(sig, val, attachments, t) +} + +// A Data is a set of rows about usage of the single measure associated +// with the given view. Each row is specific to a unique set of tags. +type Data struct { + View *View + Start, End time.Time + Rows []*Row +} + +// Row is the collected value for a specific set of key value pairs a.k.a tags. +type Row struct { + Tags []tag.Tag + Data AggregationData +} + +func (r *Row) String() string { + var buffer bytes.Buffer + buffer.WriteString("{ ") + buffer.WriteString("{ ") + for _, t := range r.Tags { + buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) + } + buffer.WriteString(" }") + buffer.WriteString(fmt.Sprintf("%v", r.Data)) + buffer.WriteString(" }") + return buffer.String() +} + +// Equal returns true if both rows are equal. Tags are expected to be ordered +// by the key name. Even both rows have the same tags but the tags appear in +// different orders it will return false. +func (r *Row) Equal(other *Row) bool { + if r == other { + return true + } + return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) +} + +const maxNameLength = 255 + +// Returns true if the given string contains only printable characters. +func isPrintable(str string) bool { + for _, r := range str { + if !(r >= ' ' && r <= '~') { + return false + } + } + return true +} + +func checkViewName(name string) error { + if len(name) > maxNameLength { + return fmt.Errorf("view name cannot be larger than %v", maxNameLength) + } + if !isPrintable(name) { + return fmt.Errorf("view name needs to be an ASCII string") + } + return nil +} diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go new file mode 100644 index 000000000..010f81bab --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -0,0 +1,140 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" +) + +func getUnit(unit string) metricdata.Unit { + switch unit { + case "1": + return metricdata.UnitDimensionless + case "ms": + return metricdata.UnitMilliseconds + case "By": + return metricdata.UnitBytes + } + return metricdata.UnitDimensionless +} + +func getType(v *View) metricdata.Type { + m := v.Measure + agg := v.Aggregation + + switch agg.Type { + case AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeDistribution: + return metricdata.TypeCumulativeDistribution + case AggTypeLastValue: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeGaugeInt64 + case *stats.Float64Measure: + return metricdata.TypeGaugeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeCount: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeInt64 + default: + panic("unexpected measure type") + } + default: + panic("unexpected aggregation type") + } +} + +func getLableKeys(v *View) []metricdata.LabelKey { + labelKeys := []metricdata.LabelKey{} + for _, k := range v.TagKeys { + labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) + } + return labelKeys +} + +func viewToMetricDescriptor(v *View) *metricdata.Descriptor { + return &metricdata.Descriptor{ + Name: v.Name, + Description: v.Description, + Unit: getUnit(v.Measure.Unit()), + Type: getType(v), + LabelKeys: getLableKeys(v), + } +} + +func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { + labelValues := []metricdata.LabelValue{} + tagMap := make(map[string]string) + for _, tag := range row.Tags { + tagMap[tag.Key.Name()] = tag.Value + } + + for _, key := range expectedKeys { + if val, ok := tagMap[key.Key]; ok { + labelValues = append(labelValues, metricdata.NewLabelValue(val)) + } else { + labelValues = append(labelValues, metricdata.LabelValue{}) + } + } + return labelValues +} + +func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { + return &metricdata.TimeSeries{ + Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, + LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), + StartTime: startTime, + } +} + +func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } + + rows := v.collectedRows() + if len(rows) == 0 { + return nil + } + + ts := []*metricdata.TimeSeries{} + for _, row := range rows { + ts = append(ts, rowToTimeseries(v, row, now, startTime)) + } + + m := &metricdata.Metric{ + Descriptor: *v.metricDescriptor, + TimeSeries: ts, + } + return m +} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go new file mode 100644 index 000000000..2f3c018af --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -0,0 +1,281 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "fmt" + "sync" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + defaultWorker = newWorker() + go defaultWorker.start() + internal.DefaultRecorder = record +} + +type measureRef struct { + measure string + views map[*viewInternal]struct{} +} + +type worker struct { + measures map[string]*measureRef + views map[string]*viewInternal + startTimes map[*viewInternal]time.Time + + timer *time.Ticker + c chan command + quit, done chan bool + mu sync.RWMutex +} + +var defaultWorker *worker + +var defaultReportingDuration = 10 * time.Second + +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. +func Find(name string) (v *View) { + req := &getViewByNameReq{ + name: name, + c: make(chan *getViewByNameResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.v +} + +// Register begins collecting data for the given views. +// Once a view is registered, it reports data to the registered exporters. +func Register(views ...*View) error { + req := ®isterViewReq{ + views: views, + err: make(chan error), + } + defaultWorker.c <- req + return <-req.err +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func Unregister(views ...*View) { + names := make([]string, len(views)) + for i := range views { + names[i] = views[i].Name + } + req := &unregisterFromViewReq{ + views: names, + done: make(chan struct{}), + } + defaultWorker.c <- req + <-req.done +} + +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. +func RetrieveData(viewName string) ([]*Row, error) { + req := &retrieveDataReq{ + now: time.Now(), + v: viewName, + c: make(chan *retrieveDataResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.rows, resp.err +} + +func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + req := &recordReq{ + tm: tags, + ms: ms.([]stats.Measurement), + attachments: attachments, + t: time.Now(), + } + defaultWorker.c <- req +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. +func SetReportingPeriod(d time.Duration) { + // TODO(acetechnologist): ensure that the duration d is more than a certain + // value. e.g. 1s + req := &setReportingPeriodReq{ + d: d, + c: make(chan bool), + } + defaultWorker.c <- req + <-req.c // don't return until the timer is set to the new duration. +} + +func newWorker() *worker { + return &worker{ + measures: make(map[string]*measureRef), + views: make(map[string]*viewInternal), + startTimes: make(map[*viewInternal]time.Time), + timer: time.NewTicker(defaultReportingDuration), + c: make(chan command, 1024), + quit: make(chan bool), + done: make(chan bool), + } +} + +func (w *worker) start() { + prodMgr := metricproducer.GlobalManager() + prodMgr.AddProducer(w) + + for { + select { + case cmd := <-w.c: + cmd.handleCommand(w) + case <-w.timer.C: + w.reportUsage(time.Now()) + case <-w.quit: + w.timer.Stop() + close(w.c) + w.done <- true + return + } + } +} + +func (w *worker) stop() { + prodMgr := metricproducer.GlobalManager() + prodMgr.DeleteProducer(w) + + w.quit <- true + <-w.done +} + +func (w *worker) getMeasureRef(name string) *measureRef { + if mr, ok := w.measures[name]; ok { + return mr + } + mr := &measureRef{ + measure: name, + views: make(map[*viewInternal]struct{}), + } + w.measures[name] = mr + return mr +} + +func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + w.mu.Lock() + defer w.mu.Unlock() + vi, err := newViewInternal(v) + if err != nil { + return nil, err + } + if x, ok := w.views[vi.view.Name]; ok { + if !x.view.same(vi.view) { + return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) + } + + // the view is already registered so there is nothing to do and the + // command is considered successful. + return x, nil + } + w.views[vi.view.Name] = vi + ref := w.getMeasureRef(vi.view.Measure.Name()) + ref.views[vi] = struct{}{} + return vi, nil +} + +func (w *worker) unregisterView(viewName string) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.views, viewName) +} + +func (w *worker) reportView(v *viewInternal, now time.Time) { + if !v.isSubscribed() { + return + } + rows := v.collectedRows() + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + viewData := &Data{ + View: v.view, + Start: w.startTimes[v], + End: time.Now(), + Rows: rows, + } + exportersMu.Lock() + for e := range exporters { + e.ExportView(viewData) + } + exportersMu.Unlock() +} + +func (w *worker) reportUsage(now time.Time) { + w.mu.Lock() + defer w.mu.Unlock() + for _, v := range w.views { + w.reportView(v, now) + } +} + +func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { + if !v.isSubscribed() { + return nil + } + + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + + var startTime time.Time + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } else { + startTime = w.startTimes[v] + } + + return viewToMetric(v, now, startTime) +} + +// Read reads all view data and returns them as metrics. +// It is typically invoked by metric reader to export stats in metric format. +func (w *worker) Read() []*metricdata.Metric { + w.mu.Lock() + defer w.mu.Unlock() + now := time.Now() + metrics := make([]*metricdata.Metric, 0, len(w.views)) + for _, v := range w.views { + metric := w.toMetric(v, now) + if metric != nil { + metrics = append(metrics, metric) + } + } + return metrics +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go new file mode 100644 index 000000000..0267e179a --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -0,0 +1,186 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "errors" + "fmt" + "strings" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +type command interface { + handleCommand(w *worker) +} + +// getViewByNameReq is the command to get a view given its name. +type getViewByNameReq struct { + name string + c chan *getViewByNameResp +} + +type getViewByNameResp struct { + v *View +} + +func (cmd *getViewByNameReq) handleCommand(w *worker) { + v := w.views[cmd.name] + if v == nil { + cmd.c <- &getViewByNameResp{nil} + return + } + cmd.c <- &getViewByNameResp{v.view} +} + +// registerViewReq is the command to register a view. +type registerViewReq struct { + views []*View + err chan error +} + +func (cmd *registerViewReq) handleCommand(w *worker) { + for _, v := range cmd.views { + if err := v.canonicalize(); err != nil { + cmd.err <- err + return + } + } + var errstr []string + for _, view := range cmd.views { + vi, err := w.tryRegisterView(view) + if err != nil { + errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) + continue + } + internal.SubscriptionReporter(view.Measure.Name()) + vi.subscribe() + } + if len(errstr) > 0 { + cmd.err <- errors.New(strings.Join(errstr, "\n")) + } else { + cmd.err <- nil + } +} + +// unregisterFromViewReq is the command to unregister to a view. Has no +// impact on the data collection for client that are pulling data from the +// library. +type unregisterFromViewReq struct { + views []string + done chan struct{} +} + +func (cmd *unregisterFromViewReq) handleCommand(w *worker) { + for _, name := range cmd.views { + vi, ok := w.views[name] + if !ok { + continue + } + + // Report pending data for this view before removing it. + w.reportView(vi, time.Now()) + + vi.unsubscribe() + if !vi.isSubscribed() { + // this was the last subscription and view is not collecting anymore. + // The collected data can be cleared. + vi.clearRows() + } + w.unregisterView(name) + } + cmd.done <- struct{}{} +} + +// retrieveDataReq is the command to retrieve data for a view. +type retrieveDataReq struct { + now time.Time + v string + c chan *retrieveDataResp +} + +type retrieveDataResp struct { + rows []*Row + err error +} + +func (cmd *retrieveDataReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() + vi, ok := w.views[cmd.v] + if !ok { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), + } + return + } + + if !vi.isSubscribed() { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), + } + return + } + cmd.c <- &retrieveDataResp{ + vi.collectedRows(), + nil, + } +} + +// recordReq is the command to record data related to multiple measures +// at once. +type recordReq struct { + tm *tag.Map + ms []stats.Measurement + attachments map[string]interface{} + t time.Time +} + +func (cmd *recordReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() + for _, m := range cmd.ms { + if (m == stats.Measurement{}) { // not registered + continue + } + ref := w.getMeasureRef(m.Measure().Name()) + for v := range ref.views { + v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) + } + } +} + +// setReportingPeriodReq is the command to modify the duration between +// reporting the collected data to the registered clients. +type setReportingPeriodReq struct { + d time.Duration + c chan bool +} + +func (cmd *setReportingPeriodReq) handleCommand(w *worker) { + w.timer.Stop() + if cmd.d <= 0 { + w.timer = time.NewTicker(defaultReportingDuration) + } else { + w.timer = time.NewTicker(cmd.d) + } + cmd.c <- true +} diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go new file mode 100644 index 000000000..b27d1b26b --- /dev/null +++ b/vendor/go.opencensus.io/tag/context.go @@ -0,0 +1,43 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "context" +) + +// FromContext returns the tag map stored in the context. +func FromContext(ctx context.Context) *Map { + // The returned tag map shouldn't be mutated. + ts := ctx.Value(mapCtxKey) + if ts == nil { + return nil + } + return ts.(*Map) +} + +// NewContext creates a new context with the given tag map. +// To propagate a tag map to downstream methods and downstream RPCs, add a tag map +// to the current context. NewContext will return a copy of the current context, +// and put the tag map into the returned one. +// If there is already a tag map in the current context, it will be replaced with m. +func NewContext(ctx context.Context, m *Map) context.Context { + return context.WithValue(ctx, mapCtxKey, m) +} + +type ctxKey struct{} + +var mapCtxKey = ctxKey{} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go new file mode 100644 index 000000000..da16b74e4 --- /dev/null +++ b/vendor/go.opencensus.io/tag/doc.go @@ -0,0 +1,26 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package tag contains OpenCensus tags. + +Tags are key-value pairs. Tags provide additional cardinality to +the OpenCensus instrumentation data. + +Tags can be propagated on the wire and in the same +process via context.Context. Encode and Decode should be +used to represent tags into their binary propagation form. +*/ +package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go new file mode 100644 index 000000000..ebbed9500 --- /dev/null +++ b/vendor/go.opencensus.io/tag/key.go @@ -0,0 +1,35 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +// Key represents a tag key. +type Key struct { + name string +} + +// NewKey creates or retrieves a string key identified by name. +// Calling NewKey consequently with the same name returns the same key. +func NewKey(name string) (Key, error) { + if !checkKeyName(name) { + return Key{}, errInvalidKeyName + } + return Key{name: name}, nil +} + +// Name returns the name of the key. +func (k Key) Name() string { + return k.name +} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go new file mode 100644 index 000000000..0272ef85a --- /dev/null +++ b/vendor/go.opencensus.io/tag/map.go @@ -0,0 +1,229 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "bytes" + "context" + "fmt" + "sort" +) + +// Tag is a key value pair that can be propagated on wire. +type Tag struct { + Key Key + Value string +} + +type tagContent struct { + value string + m metadatas +} + +// Map is a map of tags. Use New to create a context containing +// a new Map. +type Map struct { + m map[Key]tagContent +} + +// Value returns the value for the key if a value for the key exists. +func (m *Map) Value(k Key) (string, bool) { + if m == nil { + return "", false + } + v, ok := m.m[k] + return v.value, ok +} + +func (m *Map) String() string { + if m == nil { + return "nil" + } + keys := make([]Key, 0, len(m.m)) + for k := range m.m { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) + + var buffer bytes.Buffer + buffer.WriteString("{ ") + for _, k := range keys { + buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) + } + buffer.WriteString(" }") + return buffer.String() +} + +func (m *Map) insert(k Key, v string, md metadatas) { + if _, ok := m.m[k]; ok { + return + } + m.m[k] = tagContent{value: v, m: md} +} + +func (m *Map) update(k Key, v string, md metadatas) { + if _, ok := m.m[k]; ok { + m.m[k] = tagContent{value: v, m: md} + } +} + +func (m *Map) upsert(k Key, v string, md metadatas) { + m.m[k] = tagContent{value: v, m: md} +} + +func (m *Map) delete(k Key) { + delete(m.m, k) +} + +func newMap() *Map { + return &Map{m: make(map[Key]tagContent)} +} + +// Mutator modifies a tag map. +type Mutator interface { + Mutate(t *Map) (*Map, error) +} + +// Insert returns a mutator that inserts a +// value associated with k. If k already exists in the tag map, +// mutator doesn't update the value. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Insert(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.insert(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +// Update returns a mutator that updates the +// value of the tag associated with k with v. If k doesn't +// exists in the tag map, the mutator doesn't insert the value. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Update(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.update(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +// Upsert returns a mutator that upserts the +// value of the tag associated with k with v. It inserts the +// value if k doesn't exist already. It mutates the value +// if k already exists. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Upsert(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.upsert(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +func createMetadatas(mds ...Metadata) metadatas { + var metas metadatas + if len(mds) > 0 { + for _, md := range mds { + if md != nil { + md(&metas) + } + } + } else { + WithTTL(TTLUnlimitedPropagation)(&metas) + } + return metas + +} + +// Delete returns a mutator that deletes +// the value associated with k. +func Delete(k Key) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + m.delete(k) + return m, nil + }, + } +} + +// New returns a new context that contains a tag map +// originated from the incoming context and modified +// with the provided mutators. +func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { + m := newMap() + orig := FromContext(ctx) + if orig != nil { + for k, v := range orig.m { + if !checkKeyName(k.Name()) { + return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) + } + if !checkValue(v.value) { + return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) + } + m.insert(k, v.value, v.m) + } + } + var err error + for _, mod := range mutator { + m, err = mod.Mutate(m) + if err != nil { + return ctx, err + } + } + return NewContext(ctx, m), nil +} + +// Do is similar to pprof.Do: a convenience for installing the tags +// from the context as Go profiler labels. This allows you to +// correlated runtime profiling with stats. +// +// It converts the key/values from the given map to Go profiler labels +// and calls pprof.Do. +// +// Do is going to do nothing if your Go version is below 1.9. +func Do(ctx context.Context, f func(ctx context.Context)) { + do(ctx, f) +} + +type mutator struct { + fn func(t *Map) (*Map, error) +} + +func (m *mutator) Mutate(t *Map) (*Map, error) { + return m.fn(t) +} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go new file mode 100644 index 000000000..f8b582761 --- /dev/null +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -0,0 +1,239 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "encoding/binary" + "fmt" +) + +// KeyType defines the types of keys allowed. Currently only keyTypeString is +// supported. +type keyType byte + +const ( + keyTypeString keyType = iota + keyTypeInt64 + keyTypeTrue + keyTypeFalse + + tagsVersionID = byte(0) +) + +type encoderGRPC struct { + buf []byte + writeIdx, readIdx int +} + +// writeKeyString writes the fieldID '0' followed by the key string and value +// string. +func (eg *encoderGRPC) writeTagString(k, v string) { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k) + eg.writeStringWithVarintLen(v) +} + +func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { + eg.writeByte(byte(keyTypeInt64)) + eg.writeStringWithVarintLen(k) + eg.writeUint64(i) +} + +func (eg *encoderGRPC) writeTagTrue(k string) { + eg.writeByte(byte(keyTypeTrue)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeTagFalse(k string) { + eg.writeByte(byte(keyTypeFalse)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { + length := len(bytes) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], bytes) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeStringWithVarintLen(s string) { + length := len(s) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], s) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeByte(v byte) { + eg.growIfRequired(1) + eg.buf[eg.writeIdx] = v + eg.writeIdx++ +} + +func (eg *encoderGRPC) writeUint32(i uint32) { + eg.growIfRequired(4) + binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 4 +} + +func (eg *encoderGRPC) writeUint64(i uint64) { + eg.growIfRequired(8) + binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 8 +} + +func (eg *encoderGRPC) readByte() byte { + b := eg.buf[eg.readIdx] + eg.readIdx++ + return b +} + +func (eg *encoderGRPC) readUint32() uint32 { + i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) + eg.readIdx += 4 + return i +} + +func (eg *encoderGRPC) readUint64() uint64 { + i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) + eg.readIdx += 8 + return i +} + +func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { + if eg.readEnded() { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) + if valueStart <= 0 { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + + valueStart += eg.readIdx + valueEnd := valueStart + int(length) + if valueEnd > len(eg.buf) { + return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) + } + + eg.readIdx = valueEnd + return eg.buf[valueStart:valueEnd], nil +} + +func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { + bytes, err := eg.readBytesWithVarintLen() + if err != nil { + return "", err + } + return string(bytes), nil +} + +func (eg *encoderGRPC) growIfRequired(expected int) { + if len(eg.buf)-eg.writeIdx < expected { + tmp := make([]byte, 2*(len(eg.buf)+1)+expected) + copy(tmp, eg.buf) + eg.buf = tmp + } +} + +func (eg *encoderGRPC) readEnded() bool { + return eg.readIdx >= len(eg.buf) +} + +func (eg *encoderGRPC) bytes() []byte { + return eg.buf[:eg.writeIdx] +} + +// Encode encodes the tag map into a []byte. It is useful to propagate +// the tag maps on wire in binary format. +func Encode(m *Map) []byte { + if m == nil { + return nil + } + eg := &encoderGRPC{ + buf: make([]byte, len(m.m)), + } + eg.writeByte(byte(tagsVersionID)) + for k, v := range m.m { + if v.m.ttl.ttl == valueTTLUnlimitedPropagation { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k.name) + eg.writeBytesWithVarintLen([]byte(v.value)) + } + } + return eg.bytes() +} + +// Decode decodes the given []byte into a tag map. +func Decode(bytes []byte) (*Map, error) { + ts := newMap() + err := DecodeEach(bytes, ts.upsert) + if err != nil { + // no partial failures + return nil, err + } + return ts, nil +} + +// DecodeEach decodes the given serialized tag map, calling handler for each +// tag key and value decoded. +func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { + eg := &encoderGRPC{ + buf: bytes, + } + if len(eg.buf) == 0 { + return nil + } + + version := eg.readByte() + if version > tagsVersionID { + return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) + } + + for !eg.readEnded() { + typ := keyType(eg.readByte()) + + if typ != keyTypeString { + return fmt.Errorf("cannot decode: invalid key type: %q", typ) + } + + k, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + v, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + key, err := NewKey(string(k)) + if err != nil { + return err + } + val := string(v) + if !checkValue(val) { + return errInvalidValue + } + fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go new file mode 100644 index 000000000..6571a583e --- /dev/null +++ b/vendor/go.opencensus.io/tag/metadata.go @@ -0,0 +1,52 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +const ( + // valueTTLNoPropagation prevents tag from propagating. + valueTTLNoPropagation = 0 + + // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. + valueTTLUnlimitedPropagation = -1 +) + +// TTL is metadata that specifies number of hops a tag can propagate. +// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata +type TTL struct { + ttl int +} + +var ( + // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. + TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} + + // TTLNoPropagation is TTL metadata that prevents tag from propagating. + TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} +) + +type metadatas struct { + ttl TTL +} + +// Metadata applies metadatas specified by the function. +type Metadata func(*metadatas) + +// WithTTL applies metadata with provided ttl. +func WithTTL(ttl TTL) Metadata { + return func(m *metadatas) { + m.ttl = ttl + } +} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go new file mode 100644 index 000000000..b34d95e34 --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -0,0 +1,31 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package tag + +import ( + "context" + "runtime/pprof" +) + +func do(ctx context.Context, f func(ctx context.Context)) { + m := FromContext(ctx) + keyvals := make([]string, 0, 2*len(m.m)) + for k, v := range m.m { + keyvals = append(keyvals, k.Name(), v.value) + } + pprof.Do(ctx, pprof.Labels(keyvals...), f) +} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go new file mode 100644 index 000000000..83adbce56 --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -0,0 +1,23 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package tag + +import "context" + +func do(ctx context.Context, f func(ctx context.Context)) { + f(ctx) +} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go new file mode 100644 index 000000000..0939fc674 --- /dev/null +++ b/vendor/go.opencensus.io/tag/validate.go @@ -0,0 +1,56 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tag + +import "errors" + +const ( + maxKeyLength = 255 + + // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). + validKeyValueMin = 32 + validKeyValueMax = 126 +) + +var ( + errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") + errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") +) + +func checkKeyName(name string) bool { + if len(name) == 0 { + return false + } + if len(name) > maxKeyLength { + return false + } + return isASCII(name) +} + +func isASCII(s string) bool { + for _, c := range s { + if (c < validKeyValueMin) || (c > validKeyValueMax) { + return false + } + } + return true +} + +func checkValue(v string) bool { + if len(v) > maxKeyLength { + return false + } + return isASCII(v) +} diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go new file mode 100644 index 000000000..0c54492a2 --- /dev/null +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -0,0 +1,119 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "fmt" + "time" +) + +type ( + // TraceID is a 16-byte identifier for a set of spans. + TraceID [16]byte + + // SpanID is an 8-byte identifier for a single span. + SpanID [8]byte +) + +func (t TraceID) String() string { + return fmt.Sprintf("%02x", t[:]) +} + +func (s SpanID) String() string { + return fmt.Sprintf("%02x", s[:]) +} + +// Annotation represents a text annotation with a set of attributes and a timestamp. +type Annotation struct { + Time time.Time + Message string + Attributes map[string]interface{} +} + +// Attribute represents a key-value pair on a span, link or annotation. +// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. +type Attribute struct { + key string + value interface{} +} + +// BoolAttribute returns a bool-valued attribute. +func BoolAttribute(key string, value bool) Attribute { + return Attribute{key: key, value: value} +} + +// Int64Attribute returns an int64-valued attribute. +func Int64Attribute(key string, value int64) Attribute { + return Attribute{key: key, value: value} +} + +// Float64Attribute returns a float64-valued attribute. +func Float64Attribute(key string, value float64) Attribute { + return Attribute{key: key, value: value} +} + +// StringAttribute returns a string-valued attribute. +func StringAttribute(key string, value string) Attribute { + return Attribute{key: key, value: value} +} + +// LinkType specifies the relationship between the span that had the link +// added, and the linked span. +type LinkType int32 + +// LinkType values. +const ( + LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. + LinkTypeChild // The linked span is a child of the current span. + LinkTypeParent // The linked span is the parent of the current span. +) + +// Link represents a reference from one span to another span. +type Link struct { + TraceID TraceID + SpanID SpanID + Type LinkType + // Attributes is a set of attributes on the link. + Attributes map[string]interface{} +} + +// MessageEventType specifies the type of message event. +type MessageEventType int32 + +// MessageEventType values. +const ( + MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. + MessageEventTypeSent // Indicates a sent RPC message. + MessageEventTypeRecv // Indicates a received RPC message. +) + +// MessageEvent represents an event describing a message sent or received on the network. +type MessageEvent struct { + Time time.Time + EventType MessageEventType + MessageID int64 + UncompressedByteSize int64 + CompressedByteSize int64 +} + +// Status is the status of a Span. +type Status struct { + // Code is a status code. Zero indicates success. + // + // If Code will be propagated to Google APIs, it ideally should be a value from + // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . + Code int32 + Message string +} diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go new file mode 100644 index 000000000..775f8274f --- /dev/null +++ b/vendor/go.opencensus.io/trace/config.go @@ -0,0 +1,86 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + + "go.opencensus.io/trace/internal" +) + +// Config represents the global tracing configuration. +type Config struct { + // DefaultSampler is the default sampler used when creating new spans. + DefaultSampler Sampler + + // IDGenerator is for internal use only. + IDGenerator internal.IDGenerator + + // MaxAnnotationEventsPerSpan is max number of annotation events per span + MaxAnnotationEventsPerSpan int + + // MaxMessageEventsPerSpan is max number of message events per span + MaxMessageEventsPerSpan int + + // MaxAnnotationEventsPerSpan is max number of attributes per span + MaxAttributesPerSpan int + + // MaxLinksPerSpan is max number of links per span + MaxLinksPerSpan int +} + +var configWriteMu sync.Mutex + +const ( + // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span + DefaultMaxAnnotationEventsPerSpan = 32 + + // DefaultMaxMessageEventsPerSpan is default max number of message events per span + DefaultMaxMessageEventsPerSpan = 128 + + // DefaultMaxAttributesPerSpan is default max number of attributes per span + DefaultMaxAttributesPerSpan = 32 + + // DefaultMaxLinksPerSpan is default max number of links per span + DefaultMaxLinksPerSpan = 32 +) + +// ApplyConfig applies changes to the global tracing configuration. +// +// Fields not provided in the given config are going to be preserved. +func ApplyConfig(cfg Config) { + configWriteMu.Lock() + defer configWriteMu.Unlock() + c := *config.Load().(*Config) + if cfg.DefaultSampler != nil { + c.DefaultSampler = cfg.DefaultSampler + } + if cfg.IDGenerator != nil { + c.IDGenerator = cfg.IDGenerator + } + if cfg.MaxAnnotationEventsPerSpan > 0 { + c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan + } + if cfg.MaxMessageEventsPerSpan > 0 { + c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan + } + if cfg.MaxAttributesPerSpan > 0 { + c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan + } + if cfg.MaxLinksPerSpan > 0 { + c.MaxLinksPerSpan = cfg.MaxLinksPerSpan + } + config.Store(&c) +} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go new file mode 100644 index 000000000..04b1ee4f3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/doc.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace contains support for OpenCensus distributed tracing. + +The following assumes a basic familiarity with OpenCensus concepts. +See http://opencensus.io + + +Exporting Traces + +To export collected tracing data, register at least one exporter. You can use +one of the provided exporters or write your own. + + trace.RegisterExporter(exporter) + +By default, traces will be sampled relatively rarely. To change the sampling +frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler +to sample a subset of traces, or use AlwaysSample to collect a trace on every run: + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + +Be careful about using trace.AlwaysSample in a production application with +significant traffic: a new trace will be started and exported for every request. + +Adding Spans to a Trace + +A trace consists of a tree of spans. In Go, the current span is carried in a +context.Context. + +It is common to want to capture all the activity of a function call in a span. For +this to work, the function must take a context.Context as a parameter. Add these two +lines to the top of the function: + + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() + +StartSpan will create a new top-level span if the context +doesn't contain another span, otherwise it will create a child span. +*/ +package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go new file mode 100644 index 000000000..ffc264f23 --- /dev/null +++ b/vendor/go.opencensus.io/trace/evictedqueue.go @@ -0,0 +1,38 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) *evictedQueue { + eq := &evictedQueue{ + capacity: capacity, + queue: make([]interface{}, 0), + } + + return eq +} + +func (eq *evictedQueue) add(value interface{}) { + if len(eq.queue) == eq.capacity { + eq.queue = eq.queue[1:] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go new file mode 100644 index 000000000..e0d9a4b99 --- /dev/null +++ b/vendor/go.opencensus.io/trace/export.go @@ -0,0 +1,97 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "sync/atomic" + "time" +) + +// Exporter is a type for functions that receive sampled trace spans. +// +// The ExportSpan method should be safe for concurrent use and should return +// quickly; if an Exporter takes a significant amount of time to process a +// SpanData, that work should be done on another goroutine. +// +// The SpanData should not be modified, but a pointer to it can be kept. +type Exporter interface { + ExportSpan(s *SpanData) +} + +type exportersMap map[Exporter]struct{} + +var ( + exporterMu sync.Mutex + exporters atomic.Value +) + +// RegisterExporter adds to the list of Exporters that will receive sampled +// trace spans. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + new[e] = struct{}{} + exporters.Store(new) + exporterMu.Unlock() +} + +// UnregisterExporter removes from the list of Exporters the Exporter that was +// registered with the given name. +func UnregisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + delete(new, e) + exporters.Store(new) + exporterMu.Unlock() +} + +// SpanData contains all the information collected by a Span. +type SpanData struct { + SpanContext + ParentSpanID SpanID + SpanKind int + Name string + StartTime time.Time + // The wall clock time of EndTime will be adjusted to always be offset + // from StartTime by the duration of the span. + EndTime time.Time + // The values of Attributes each have type string, bool, or int64. + Attributes map[string]interface{} + Annotations []Annotation + MessageEvents []MessageEvent + Status + Links []Link + HasRemoteParent bool + DroppedAttributeCount int + DroppedAnnotationCount int + DroppedMessageEventCount int + DroppedLinkCount int + + // ChildSpanCount holds the number of child span created for this span. + ChildSpanCount int +} diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go new file mode 100644 index 000000000..7e808d8f3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/internal/internal.go @@ -0,0 +1,22 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides trace internals. +package internal + +// IDGenerator allows custom generators for TraceId and SpanId. +type IDGenerator interface { + NewTraceID() [16]byte + NewSpanID() [8]byte +} diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go new file mode 100644 index 000000000..3f80a3368 --- /dev/null +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -0,0 +1,37 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "github.com/hashicorp/golang-lru/simplelru" +) + +type lruMap struct { + simpleLruMap *simplelru.LRU + droppedCount int +} + +func newLruMap(size int) *lruMap { + lm := &lruMap{} + lm.simpleLruMap, _ = simplelru.NewLRU(size, nil) + return lm +} + +func (lm *lruMap) add(key, value interface{}) { + evicted := lm.simpleLruMap.Add(key, value) + if evicted { + lm.droppedCount++ + } +} diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go new file mode 100644 index 000000000..1eb190a96 --- /dev/null +++ b/vendor/go.opencensus.io/trace/propagation/propagation.go @@ -0,0 +1,108 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package propagation implements the binary trace context format. +package propagation // import "go.opencensus.io/trace/propagation" + +// TODO: link to external spec document. + +// BinaryFormat format: +// +// Binary value: +// version_id: 1 byte representing the version id. +// +// For version_id = 0: +// +// version_format: +// field_format: +// +// Fields: +// +// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. +// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. +// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. +// +// Fields MUST be encoded using the field id order (smaller to higher). +// +// Valid value example: +// +// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, +// 98, 99, 100, 101, 102, 103, 104, 2, 1} +// +// version_id = 0; +// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} +// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; +// trace_options = {1}; + +import ( + "net/http" + + "go.opencensus.io/trace" +) + +// Binary returns the binary format representation of a SpanContext. +// +// If sc is the zero value, Binary returns nil. +func Binary(sc trace.SpanContext) []byte { + if sc == (trace.SpanContext{}) { + return nil + } + var b [29]byte + copy(b[2:18], sc.TraceID[:]) + b[18] = 1 + copy(b[19:27], sc.SpanID[:]) + b[27] = 2 + b[28] = uint8(sc.TraceOptions) + return b[:] +} + +// FromBinary returns the SpanContext represented by b. +// +// If b has an unsupported version ID or contains no TraceID, FromBinary +// returns with ok==false. +func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { + if len(b) == 0 || b[0] != 0 { + return trace.SpanContext{}, false + } + b = b[1:] + if len(b) >= 17 && b[0] == 0 { + copy(sc.TraceID[:], b[1:17]) + b = b[17:] + } else { + return trace.SpanContext{}, false + } + if len(b) >= 9 && b[0] == 1 { + copy(sc.SpanID[:], b[1:9]) + b = b[9:] + } + if len(b) >= 2 && b[0] == 2 { + sc.TraceOptions = trace.TraceOptions(b[1]) + } + return sc, true +} + +// HTTPFormat implementations propagate span contexts +// in HTTP requests. +// +// SpanContextFromRequest extracts a span context from incoming +// requests. +// +// SpanContextToRequest modifies the given request to include the given +// span context. +type HTTPFormat interface { + SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) + SpanContextToRequest(sc trace.SpanContext, req *http.Request) +} + +// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go new file mode 100644 index 000000000..71c10f9e3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/sampling.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/binary" +) + +const defaultSamplingProbability = 1e-4 + +// Sampler decides whether a trace should be sampled and exported. +type Sampler func(SamplingParameters) SamplingDecision + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext SpanContext + TraceID TraceID + SpanID SpanID + Name string + HasRemoteParent bool +} + +// SamplingDecision is the value returned by a Sampler. +type SamplingDecision struct { + Sample bool +} + +// ProbabilitySampler returns a Sampler that samples a given fraction of traces. +// +// It also samples spans whose parents are sampled. +func ProbabilitySampler(fraction float64) Sampler { + if !(fraction >= 0) { + fraction = 0 + } else if fraction >= 1 { + return AlwaysSample() + } + + traceIDUpperBound := uint64(fraction * (1 << 63)) + return Sampler(func(p SamplingParameters) SamplingDecision { + if p.ParentContext.IsSampled() { + return SamplingDecision{Sample: true} + } + x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + return SamplingDecision{Sample: x < traceIDUpperBound} + }) +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: true} + } +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: false} + } +} diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go new file mode 100644 index 000000000..fbabad34c --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanbucket.go @@ -0,0 +1,130 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "time" +) + +// samplePeriod is the minimum time between accepting spans in a single bucket. +const samplePeriod = time.Second + +// defaultLatencies contains the default latency bucket bounds. +// TODO: consider defaults, make configurable +var defaultLatencies = [...]time.Duration{ + 10 * time.Microsecond, + 100 * time.Microsecond, + time.Millisecond, + 10 * time.Millisecond, + 100 * time.Millisecond, + time.Second, + 10 * time.Second, + time.Minute, +} + +// bucket is a container for a set of spans for a particular error code or latency range. +type bucket struct { + nextTime time.Time // next time we can accept a span + buffer []*SpanData // circular buffer of spans + nextIndex int // location next SpanData should be placed in buffer + overflow bool // whether the circular buffer has wrapped around +} + +func makeBucket(bufferSize int) bucket { + return bucket{ + buffer: make([]*SpanData, bufferSize), + } +} + +// add adds a span to the bucket, if nextTime has been reached. +func (b *bucket) add(s *SpanData) { + if s.EndTime.Before(b.nextTime) { + return + } + if len(b.buffer) == 0 { + return + } + b.nextTime = s.EndTime.Add(samplePeriod) + b.buffer[b.nextIndex] = s + b.nextIndex++ + if b.nextIndex == len(b.buffer) { + b.nextIndex = 0 + b.overflow = true + } +} + +// size returns the number of spans in the bucket. +func (b *bucket) size() int { + if b.overflow { + return len(b.buffer) + } + return b.nextIndex +} + +// span returns the ith span in the bucket. +func (b *bucket) span(i int) *SpanData { + if !b.overflow { + return b.buffer[i] + } + if i < len(b.buffer)-b.nextIndex { + return b.buffer[b.nextIndex+i] + } + return b.buffer[b.nextIndex+i-len(b.buffer)] +} + +// resize changes the size of the bucket to n, keeping up to n existing spans. +func (b *bucket) resize(n int) { + cur := b.size() + newBuffer := make([]*SpanData, n) + if cur < n { + for i := 0; i < cur; i++ { + newBuffer[i] = b.span(i) + } + b.buffer = newBuffer + b.nextIndex = cur + b.overflow = false + return + } + for i := 0; i < n; i++ { + newBuffer[i] = b.span(i + cur - n) + } + b.buffer = newBuffer + b.nextIndex = 0 + b.overflow = true +} + +// latencyBucket returns the appropriate bucket number for a given latency. +func latencyBucket(latency time.Duration) int { + i := 0 + for i < len(defaultLatencies) && latency >= defaultLatencies[i] { + i++ + } + return i +} + +// latencyBucketBounds returns the lower and upper bounds for a latency bucket +// number. +// +// The lower bound is inclusive, the upper bound is exclusive (except for the +// last bucket.) +func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { + if index == 0 { + return 0, defaultLatencies[index] + } + if index == len(defaultLatencies) { + return defaultLatencies[index-1], 1<<63 - 1 + } + return defaultLatencies[index-1], defaultLatencies[index] +} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go new file mode 100644 index 000000000..c442d9902 --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanstore.go @@ -0,0 +1,306 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "time" + + "go.opencensus.io/internal" +) + +const ( + maxBucketSize = 100000 + defaultBucketSize = 10 +) + +var ( + ssmu sync.RWMutex // protects spanStores + spanStores = make(map[string]*spanStore) +) + +// This exists purely to avoid exposing internal methods used by z-Pages externally. +type internalOnly struct{} + +func init() { + //TODO(#412): remove + internal.Trace = &internalOnly{} +} + +// ReportActiveSpans returns the active spans for the given name. +func (i internalOnly) ReportActiveSpans(name string) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for span := range s.active { + out = append(out, span.makeSpanData()) + } + return out +} + +// ReportSpansByError returns a sample of error spans. +// +// If code is nonzero, only spans with that status code are returned. +func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + if code != 0 { + if b, ok := s.errors[code]; ok { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } else { + for _, b := range s.errors { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } + return out +} + +// ConfigureBucketSizes sets the number of spans to keep per latency and error +// bucket for different span names. +func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { + for _, bc := range bcs { + latencyBucketSize := bc.MaxRequestsSucceeded + if latencyBucketSize < 0 { + latencyBucketSize = 0 + } + if latencyBucketSize > maxBucketSize { + latencyBucketSize = maxBucketSize + } + errorBucketSize := bc.MaxRequestsErrors + if errorBucketSize < 0 { + errorBucketSize = 0 + } + if errorBucketSize > maxBucketSize { + errorBucketSize = maxBucketSize + } + spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) + } +} + +// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. +func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { + out := make(map[string]internal.PerMethodSummary) + ssmu.RLock() + defer ssmu.RUnlock() + for name, s := range spanStores { + s.mu.Lock() + p := internal.PerMethodSummary{ + Active: len(s.active), + } + for code, b := range s.errors { + p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ + ErrorCode: code, + Size: b.size(), + }) + } + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ + MinLatency: min, + MaxLatency: max, + Size: b.size(), + }) + } + s.mu.Unlock() + out[name] = p + } + return out +} + +// ReportSpansByLatency returns a sample of successful spans. +// +// minLatency is the minimum latency of spans to be returned. +// maxLatency, if nonzero, is the maximum latency of spans to be returned. +func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + if i+1 != len(s.latency) && max <= minLatency { + continue + } + if maxLatency != 0 && maxLatency < min { + continue + } + for _, sd := range b.buffer { + if sd == nil { + break + } + if minLatency != 0 || maxLatency != 0 { + d := sd.EndTime.Sub(sd.StartTime) + if d < minLatency { + continue + } + if maxLatency != 0 && d > maxLatency { + continue + } + } + out = append(out, sd) + } + } + return out +} + +// spanStore keeps track of spans stored for a particular span name. +// +// It contains all active spans; a sample of spans for failed requests, +// categorized by error code; and a sample of spans for successful requests, +// bucketed by latency. +type spanStore struct { + mu sync.Mutex // protects everything below. + active map[*Span]struct{} + errors map[int32]*bucket + latency []bucket + maxSpansPerErrorBucket int +} + +// newSpanStore creates a span store. +func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { + s := &spanStore{ + active: make(map[*Span]struct{}), + latency: make([]bucket, len(defaultLatencies)+1), + maxSpansPerErrorBucket: errorBucketSize, + } + for i := range s.latency { + s.latency[i] = makeBucket(latencyBucketSize) + } + return s +} + +// spanStoreForName returns the spanStore for the given name. +// +// It returns nil if it doesn't exist. +func spanStoreForName(name string) *spanStore { + var s *spanStore + ssmu.RLock() + s, _ = spanStores[name] + ssmu.RUnlock() + return s +} + +// spanStoreForNameCreateIfNew returns the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreForNameCreateIfNew(name string) *spanStore { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + return s + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + return s + } + s = newSpanStore(name, defaultBucketSize, defaultBucketSize) + spanStores[name] = s + return s +} + +// spanStoreSetSize resizes the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + s = newSpanStore(name, latencyBucketSize, errorBucketSize) + spanStores[name] = s +} + +func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { + s.mu.Lock() + for i := range s.latency { + s.latency[i].resize(latencyBucketSize) + } + for _, b := range s.errors { + b.resize(errorBucketSize) + } + s.maxSpansPerErrorBucket = errorBucketSize + s.mu.Unlock() +} + +// add adds a span to the active bucket of the spanStore. +func (s *spanStore) add(span *Span) { + s.mu.Lock() + s.active[span] = struct{}{} + s.mu.Unlock() +} + +// finished removes a span from the active set, and adds a corresponding +// SpanData to a latency or error bucket. +func (s *spanStore) finished(span *Span, sd *SpanData) { + latency := sd.EndTime.Sub(sd.StartTime) + if latency < 0 { + latency = 0 + } + code := sd.Status.Code + + s.mu.Lock() + delete(s.active, span) + if code == 0 { + s.latency[latencyBucket(latency)].add(sd) + } else { + if s.errors == nil { + s.errors = make(map[int32]*bucket) + } + if b := s.errors[code]; b != nil { + b.add(sd) + } else { + b := makeBucket(s.maxSpansPerErrorBucket) + s.errors[code] = &b + b.add(sd) + } + } + s.mu.Unlock() +} diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go new file mode 100644 index 000000000..ec60effd1 --- /dev/null +++ b/vendor/go.opencensus.io/trace/status_codes.go @@ -0,0 +1,37 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// Status codes for use with Span.SetStatus. These correspond to the status +// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +const ( + StatusCodeOK = 0 + StatusCodeCancelled = 1 + StatusCodeUnknown = 2 + StatusCodeInvalidArgument = 3 + StatusCodeDeadlineExceeded = 4 + StatusCodeNotFound = 5 + StatusCodeAlreadyExists = 6 + StatusCodePermissionDenied = 7 + StatusCodeResourceExhausted = 8 + StatusCodeFailedPrecondition = 9 + StatusCodeAborted = 10 + StatusCodeOutOfRange = 11 + StatusCodeUnimplemented = 12 + StatusCodeInternal = 13 + StatusCodeUnavailable = 14 + StatusCodeDataLoss = 15 + StatusCodeUnauthenticated = 16 +) diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go new file mode 100644 index 000000000..38ead7bf0 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace.go @@ -0,0 +1,598 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "go.opencensus.io/internal" + "go.opencensus.io/trace/tracestate" +) + +// Span represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type Span struct { + // data contains information recorded about the span. + // + // It will be non-nil if we are exporting the span or recording events for it. + // Otherwise, data is nil, and the Span is simply a carrier for the + // SpanContext, so that the trace ID is propagated. + data *SpanData + mu sync.Mutex // protects the contents of *data (but not the pointer value.) + spanContext SpanContext + + // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry + // is removed to create room for a new entry. + lruAttributes *lruMap + + // annotations are stored in FIFO queue capped by configured limit. + annotations *evictedQueue + + // messageEvents are stored in FIFO queue capped by configured limit. + messageEvents *evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links *evictedQueue + + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. + *spanStore + endOnce sync.Once + + executionTracerTaskEnd func() // ends the execution tracer span +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.data != nil +} + +// TraceOptions contains options associated with a trace span. +type TraceOptions uint32 + +// IsSampled returns true if the span will be exported. +func (sc SpanContext) IsSampled() bool { + return sc.TraceOptions.IsSampled() +} + +// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. +func (sc *SpanContext) setIsSampled(sampled bool) { + if sampled { + sc.TraceOptions |= 1 + } else { + sc.TraceOptions &= ^TraceOptions(1) + } +} + +// IsSampled returns true if the span will be exported. +func (t TraceOptions) IsSampled() bool { + return t&1 == 1 +} + +// SpanContext contains the state that must propagate across process boundaries. +// +// SpanContext is not an implementation of context.Context. +// TODO: add reference to external Census docs for SpanContext. +type SpanContext struct { + TraceID TraceID + SpanID SpanID + TraceOptions TraceOptions + Tracestate *tracestate.Tracestate +} + +type contextKey struct{} + +// FromContext returns the Span stored in a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { + return context.WithValue(parent, contextKey{}, s) +} + +// All available span kinds. Span kind must be either one of these values. +const ( + SpanKindUnspecified = iota + SpanKindServer + SpanKindClient +) + +// StartOptions contains options concerning how a span is started. +type StartOptions struct { + // Sampler to consult for this Span. If provided, it is always consulted. + // + // If not provided, then the behavior differs based on whether + // the parent of this Span is remote, local, or there is no parent. + // In the case of a remote parent or no parent, the + // default sampler (see Config) will be consulted. Otherwise, + // when there is a non-remote parent, no new sampling decision will be made: + // we will preserve the sampling of the parent. + Sampler Sampler + + // SpanKind represents the kind of a span. If none is set, + // SpanKindUnspecified is used. + SpanKind int +} + +// StartOption apply changes to StartOptions. +type StartOption func(*StartOptions) + +// WithSpanKind makes new spans to be created with the given kind. +func WithSpanKind(spanKind int) StartOption { + return func(o *StartOptions) { + o.SpanKind = spanKind + } +} + +// WithSampler makes new spans to be be created with a custom sampler. +// Otherwise, the global sampler is used. +func WithSampler(sampler Sampler) StartOption { + return func(o *StartOptions) { + o.Sampler = sampler + } +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + var parent SpanContext + if p := FromContext(ctx); p != nil { + p.addChild() + parent = p.spanContext + } + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) + + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { + span := &Span{} + span.spanContext = parent + + cfg := config.Load().(*Config) + + if !hasParent { + span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + } + span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + sampler := cfg.DefaultSampler + + if !hasParent || remoteParent || o.Sampler != nil { + // If this span is the child of a local span and no Sampler is set in the + // options, keep the parent's TraceOptions. + // + // Otherwise, consult the Sampler in the options if it is non-nil, otherwise + // the default sampler. + if o.Sampler != nil { + sampler = o.Sampler + } + span.spanContext.setIsSampled(sampler(SamplingParameters{ + ParentContext: parent, + TraceID: span.spanContext.TraceID, + SpanID: span.spanContext.SpanID, + Name: name, + HasRemoteParent: remoteParent}).Sample) + } + + if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() { + return span + } + + span.data = &SpanData{ + SpanContext: span.spanContext, + StartTime: time.Now(), + SpanKind: o.SpanKind, + Name: name, + HasRemoteParent: remoteParent, + } + span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + span.links = newEvictedQueue(cfg.MaxLinksPerSpan) + + if hasParent { + span.data.ParentSpanID = parent.SpanID + } + if internal.LocalSpanStoreEnabled { + var ss *spanStore + ss = spanStoreForNameCreateIfNew(name) + if ss != nil { + span.spanStore = ss + ss.add(span) + } + } + + return span +} + +// End ends the span. +func (s *Span) End() { + if s == nil { + return + } + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + if !s.IsRecordingEvents() { + return + } + s.endOnce.Do(func() { + exp, _ := exporters.Load().(exportersMap) + mustExport := s.spanContext.IsSampled() && len(exp) > 0 + if s.spanStore != nil || mustExport { + sd := s.makeSpanData() + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) + if s.spanStore != nil { + s.spanStore.finished(s, sd) + } + if mustExport { + for e := range exp { + e.ExportSpan(sd) + } + } + } + }) +} + +// makeSpanData produces a SpanData representing the current state of the Span. +// It requires that s.data is non-nil. +func (s *Span) makeSpanData() *SpanData { + var sd SpanData + s.mu.Lock() + sd = *s.data + if s.lruAttributes.simpleLruMap.Len() > 0 { + sd.Attributes = s.lruAttributesToAttributeMap() + sd.DroppedAttributeCount = s.lruAttributes.droppedCount + } + if len(s.annotations.queue) > 0 { + sd.Annotations = s.interfaceArrayToAnnotationArray() + sd.DroppedAnnotationCount = s.annotations.droppedCount + } + if len(s.messageEvents.queue) > 0 { + sd.MessageEvents = s.interfaceArrayToMessageEventArray() + sd.DroppedMessageEventCount = s.messageEvents.droppedCount + } + if len(s.links.queue) > 0 { + sd.Links = s.interfaceArrayToLinksArray() + sd.DroppedLinkCount = s.links.droppedCount + } + s.mu.Unlock() + return &sd +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.spanContext +} + +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Name = name + s.mu.Unlock() +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Status = status + s.mu.Unlock() +} + +func (s *Span) interfaceArrayToLinksArray() []Link { + linksArr := make([]Link, 0) + for _, value := range s.links.queue { + linksArr = append(linksArr, value.(Link)) + } + return linksArr +} + +func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { + messageEventArr := make([]MessageEvent, 0) + for _, value := range s.messageEvents.queue { + messageEventArr = append(messageEventArr, value.(MessageEvent)) + } + return messageEventArr +} + +func (s *Span) interfaceArrayToAnnotationArray() []Annotation { + annotationArr := make([]Annotation, 0) + for _, value := range s.annotations.queue { + annotationArr = append(annotationArr, value.(Annotation)) + } + return annotationArr +} + +func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { + attributes := make(map[string]interface{}) + for _, key := range s.lruAttributes.simpleLruMap.Keys() { + value, ok := s.lruAttributes.simpleLruMap.Get(key) + if ok { + keyStr := key.(string) + attributes[keyStr] = value + } + } + return attributes +} + +func (s *Span) copyToCappedAttributes(attributes []Attribute) { + for _, a := range attributes { + s.lruAttributes.add(a.key, a.value) + } +} + +func (s *Span) addChild() { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.ChildSpanCount++ + s.mu.Unlock() +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.copyToCappedAttributes(attributes) + s.mu.Unlock() +} + +// copyAttributes copies a slice of Attributes into a map. +func copyAttributes(m map[string]interface{}, attributes []Attribute) { + for _, a := range attributes { + m[a.key] = a.value + } +} + +func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) { + now := time.Now() + msg := fmt.Sprintf(format, a...) + var m map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + m = make(map[string]interface{}) + copyAttributes(m, attributes) + } + s.annotations.add(Annotation{ + Time: now, + Message: msg, + Attributes: m, + }) + s.mu.Unlock() +} + +func (s *Span) printStringInternal(attributes []Attribute, str string) { + now := time.Now() + var a map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + a = make(map[string]interface{}) + copyAttributes(a, attributes) + } + s.annotations.add(Annotation{ + Time: now, + Message: str, + Attributes: a, + }) + s.mu.Unlock() +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.printStringInternal(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.lazyPrintfInternal(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeSent, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeRecv, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() +} + +func (s *Span) String() string { + if s == nil { + return "" + } + if s.data == nil { + return fmt.Sprintf("span %s", s.spanContext.SpanID) + } + s.mu.Lock() + str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) + s.mu.Unlock() + return str +} + +var config atomic.Value // access atomically + +func init() { + gen := &defaultIDGenerator{} + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + + config.Store(&Config{ + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: gen, + MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, + MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, + MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, + MaxLinksPerSpan: DefaultMaxLinksPerSpan, + }) +} + +type defaultIDGenerator struct { + sync.Mutex + + // Please keep these as the first fields + // so that these 8 byte fields will be aligned on addresses + // divisible by 8, on both 32-bit and 64-bit machines when + // performing atomic increments and accesses. + // See: + // * https://github.com/census-instrumentation/opencensus-go/issues/587 + // * https://github.com/census-instrumentation/opencensus-go/issues/865 + // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG + nextSpanID uint64 + spanIDInc uint64 + + traceIDAdd [2]uint64 + traceIDRand *rand.Rand +} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *defaultIDGenerator) NewSpanID() [8]byte { + var id uint64 + for id == 0 { + id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) + } + var sid [8]byte + binary.LittleEndian.PutUint64(sid[:], id) + return sid +} + +// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. +// mu should be held while this function is called. +func (gen *defaultIDGenerator) NewTraceID() [16]byte { + var tid [16]byte + // Construct the trace ID from two outputs of traceIDRand, with a constant + // added to each half for additional entropy. + gen.Lock() + binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) + binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) + gen.Unlock() + return tid +} diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go new file mode 100644 index 000000000..b7d8aaf28 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -0,0 +1,32 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.11 + +package trace + +import ( + "context" + t "runtime/trace" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + if !t.IsEnabled() { + // Avoid additional overhead if + // runtime/trace is not enabled. + return ctx, func() {} + } + nctx, task := t.NewTask(ctx, name) + return nctx, task.End +} diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go new file mode 100644 index 000000000..e25419859 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.11 + +package trace + +import ( + "context" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + return ctx, func() {} +} diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go new file mode 100644 index 000000000..2d6c713eb --- /dev/null +++ b/vendor/go.opencensus.io/trace/tracestate/tracestate.go @@ -0,0 +1,147 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracestate implements support for the Tracestate header of the +// W3C TraceContext propagation format. +package tracestate + +import ( + "fmt" + "regexp" +) + +const ( + keyMaxSize = 256 + valueMaxSize = 256 + maxKeyValuePairs = 32 +) + +const ( + keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` +) + +var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) +var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) + +// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different +// vendors propagate additional information and inter-operate with their legacy Id formats. +type Tracestate struct { + entries []Entry +} + +// Entry represents one key-value pair in a list of key-value pair of Tracestate. +type Entry struct { + // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, + // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and + // forward slashes /. + Key string + + // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the + // range 0x20 to 0x7E) except comma , and =. + Value string +} + +// Entries returns a slice of Entry. +func (ts *Tracestate) Entries() []Entry { + if ts == nil { + return nil + } + return ts.entries +} + +func (ts *Tracestate) remove(key string) *Entry { + for index, entry := range ts.entries { + if entry.Key == key { + ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) + return &entry + } + } + return nil +} + +func (ts *Tracestate) add(entries []Entry) error { + for _, entry := range entries { + ts.remove(entry.Key) + } + if len(ts.entries)+len(entries) > maxKeyValuePairs { + return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", + len(entries), len(ts.entries), maxKeyValuePairs) + } + ts.entries = append(entries, ts.entries...) + return nil +} + +func isValid(entry Entry) bool { + return keyValidationRegExp.MatchString(entry.Key) && + valueValidationRegExp.MatchString(entry.Value) +} + +func containsDuplicateKey(entries ...Entry) (string, bool) { + keyMap := make(map[string]int) + for _, entry := range entries { + if _, ok := keyMap[entry.Key]; ok { + return entry.Key, true + } + keyMap[entry.Key] = 1 + } + return "", false +} + +func areEntriesValid(entries ...Entry) (*Entry, bool) { + for _, entry := range entries { + if !isValid(entry) { + return &entry, false + } + } + return nil, true +} + +// New creates a Tracestate object from a parent and/or entries (key-value pair). +// Entries from the parent are copied if present. The entries passed to this function +// are inserted in front of those copied from the parent. If an entry copied from the +// parent contains the same key as one of the entry in entries then the entry copied +// from the parent is removed. See add func. +// +// An error is returned with nil Tracestate if +// 1. one or more entry in entries is invalid. +// 2. two or more entries in the input entries have the same key. +// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. +// (duplicate entry is counted only once). +func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { + if parent == nil && len(entries) == 0 { + return nil, nil + } + if entry, ok := areEntriesValid(entries...); !ok { + return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) + } + + if key, duplicate := containsDuplicateKey(entries...); duplicate { + return nil, fmt.Errorf("contains duplicate keys (%s)", key) + } + + tracestate := Tracestate{} + + if parent != nil && len(parent.entries) > 0 { + tracestate.entries = append([]Entry{}, parent.entries...) + } + + err := tracestate.add(entries) + if err != nil { + return nil, err + } + return &tracestate, nil +} diff --git a/vendor/go.uber.org/ratelimit/LICENSE b/vendor/go.uber.org/ratelimit/LICENSE new file mode 100644 index 000000000..0f3edc861 --- /dev/null +++ b/vendor/go.uber.org/ratelimit/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/go.uber.org/ratelimit/internal/clock/clock.go b/vendor/go.uber.org/ratelimit/internal/clock/clock.go new file mode 100644 index 000000000..17569d886 --- /dev/null +++ b/vendor/go.uber.org/ratelimit/internal/clock/clock.go @@ -0,0 +1,133 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package clock + +// Forked from github.com/andres-erbsen/clock to isolate a missing nap. + +import ( + "container/heap" + "sync" + "time" +) + +// Mock represents a mock clock that only moves forward programmically. +// It can be preferable to a real-time clock when testing time-based functionality. +type Mock struct { + sync.Mutex + now time.Time // current time + timers Timers // timers +} + +// NewMock returns an instance of a mock clock. +// The current time of the mock clock on initialization is the Unix epoch. +func NewMock() *Mock { + return &Mock{now: time.Unix(0, 0)} +} + +// Add moves the current time of the mock clock forward by the duration. +// This should only be called from a single goroutine at a time. +func (m *Mock) Add(d time.Duration) { + m.Lock() + // Calculate the final time. + end := m.now.Add(d) + + for len(m.timers) > 0 && m.now.Before(end) { + t := heap.Pop(&m.timers).(*Timer) + m.now = t.next + m.Unlock() + t.Tick() + m.Lock() + } + + m.Unlock() + // Give a small buffer to make sure the other goroutines get handled. + nap() +} + +// Timer produces a timer that will emit a time some duration after now. +func (m *Mock) Timer(d time.Duration) *Timer { + ch := make(chan time.Time) + t := &Timer{ + C: ch, + c: ch, + mock: m, + next: m.now.Add(d), + } + m.addTimer(t) + return t +} + +func (m *Mock) addTimer(t *Timer) { + m.Lock() + defer m.Unlock() + heap.Push(&m.timers, t) +} + +// After produces a channel that will emit the time after a duration passes. +func (m *Mock) After(d time.Duration) <-chan time.Time { + return m.Timer(d).C +} + +// AfterFunc waits for the duration to elapse and then executes a function. +// A Timer is returned that can be stopped. +func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer { + t := m.Timer(d) + go func() { + <-t.c + f() + }() + nap() + return t +} + +// Now returns the current wall time on the mock clock. +func (m *Mock) Now() time.Time { + m.Lock() + defer m.Unlock() + return m.now +} + +// Sleep pauses the goroutine for the given duration on the mock clock. +// The clock must be moved forward in a separate goroutine. +func (m *Mock) Sleep(d time.Duration) { + <-m.After(d) +} + +// Timer represents a single event. +type Timer struct { + C <-chan time.Time + c chan time.Time + next time.Time // next tick time + mock *Mock // mock clock +} + +func (t *Timer) Next() time.Time { return t.next } + +func (t *Timer) Tick() { + select { + case t.c <- t.next: + default: + } + nap() +} + +// Sleep momentarily so that other goroutines can process. +func nap() { time.Sleep(1 * time.Millisecond) } diff --git a/vendor/go.uber.org/ratelimit/internal/clock/interface.go b/vendor/go.uber.org/ratelimit/internal/clock/interface.go new file mode 100644 index 000000000..d380b3bb4 --- /dev/null +++ b/vendor/go.uber.org/ratelimit/internal/clock/interface.go @@ -0,0 +1,34 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package clock + +import "time" + +// Clock represents an interface to the functions in the standard library time +// package. Two implementations are available in the clock package. The first +// is a real-time clock which simply wraps the time package's functions. The +// second is a mock clock which will only make forward progress when +// programmatically adjusted. +type Clock interface { + AfterFunc(d time.Duration, f func()) + Now() time.Time + Sleep(d time.Duration) +} diff --git a/vendor/go.uber.org/ratelimit/internal/clock/real.go b/vendor/go.uber.org/ratelimit/internal/clock/real.go new file mode 100644 index 000000000..3a1be7e26 --- /dev/null +++ b/vendor/go.uber.org/ratelimit/internal/clock/real.go @@ -0,0 +1,42 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package clock + +import "time" + +// clock implements a real-time clock by simply wrapping the time package functions. +type clock struct{} + +// New returns an instance of a real-time clock. +func New() Clock { + return &clock{} +} + +func (c *clock) After(d time.Duration) <-chan time.Time { return time.After(d) } + +func (c *clock) AfterFunc(d time.Duration, f func()) { + // TODO maybe return timer interface + time.AfterFunc(d, f) +} + +func (c *clock) Now() time.Time { return time.Now() } + +func (c *clock) Sleep(d time.Duration) { time.Sleep(d) } diff --git a/vendor/go.uber.org/ratelimit/internal/clock/timers.go b/vendor/go.uber.org/ratelimit/internal/clock/timers.go new file mode 100644 index 000000000..577dd395d --- /dev/null +++ b/vendor/go.uber.org/ratelimit/internal/clock/timers.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package clock + +// timers represents a list of sortable timers. +type Timers []*Timer + +func (ts Timers) Len() int { return len(ts) } + +func (ts Timers) Swap(i, j int) { + ts[i], ts[j] = ts[j], ts[i] +} + +func (ts Timers) Less(i, j int) bool { + return ts[i].Next().Before(ts[j].Next()) +} + +func (ts *Timers) Push(t interface{}) { + *ts = append(*ts, t.(*Timer)) +} + +func (ts *Timers) Pop() interface{} { + t := (*ts)[len(*ts)-1] + *ts = (*ts)[:len(*ts)-1] + return t +} diff --git a/vendor/go.uber.org/ratelimit/ratelimit.go b/vendor/go.uber.org/ratelimit/ratelimit.go new file mode 100644 index 000000000..27698566d --- /dev/null +++ b/vendor/go.uber.org/ratelimit/ratelimit.go @@ -0,0 +1,140 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ratelimit // import "go.uber.org/ratelimit" + +import ( + "sync" + "time" + + "go.uber.org/ratelimit/internal/clock" +) + +// Note: This file is inspired by: +// https://github.com/prashantv/go-bench/blob/master/ratelimit + +// Limiter is used to rate-limit some process, possibly across goroutines. +// The process is expected to call Take() before every iteration, which +// may block to throttle the goroutine. +type Limiter interface { + // Take should block to make sure that the RPS is met. + Take() time.Time +} + +// Clock is the minimum necessary interface to instantiate a rate limiter with +// a clock or mock clock, compatible with clocks created using +// github.com/andres-erbsen/clock. +type Clock interface { + Now() time.Time + Sleep(time.Duration) +} + +type limiter struct { + sync.Mutex + last time.Time + sleepFor time.Duration + perRequest time.Duration + maxSlack time.Duration + clock Clock +} + +// Option configures a Limiter. +type Option func(l *limiter) + +// New returns a Limiter that will limit to the given RPS. +func New(rate int, opts ...Option) Limiter { + l := &limiter{ + perRequest: time.Second / time.Duration(rate), + maxSlack: -10 * time.Second / time.Duration(rate), + } + for _, opt := range opts { + opt(l) + } + if l.clock == nil { + l.clock = clock.New() + } + return l +} + +// WithClock returns an option for ratelimit.New that provides an alternate +// Clock implementation, typically a mock Clock for testing. +func WithClock(clock Clock) Option { + return func(l *limiter) { + l.clock = clock + } +} + +// WithoutSlack is an option for ratelimit.New that initializes the limiter +// without any initial tolerance for bursts of traffic. +var WithoutSlack Option = withoutSlackOption + +func withoutSlackOption(l *limiter) { + l.maxSlack = 0 +} + +// Take blocks to ensure that the time spent between multiple +// Take calls is on average time.Second/rate. +func (t *limiter) Take() time.Time { + t.Lock() + defer t.Unlock() + + now := t.clock.Now() + + // If this is our first request, then we allow it. + if t.last.IsZero() { + t.last = now + return t.last + } + + // sleepFor calculates how much time we should sleep based on + // the perRequest budget and how long the last request took. + // Since the request may take longer than the budget, this number + // can get negative, and is summed across requests. + t.sleepFor += t.perRequest - now.Sub(t.last) + + // We shouldn't allow sleepFor to get too negative, since it would mean that + // a service that slowed down a lot for a short period of time would get + // a much higher RPS following that. + if t.sleepFor < t.maxSlack { + t.sleepFor = t.maxSlack + } + + // If sleepFor is positive, then we should sleep now. + if t.sleepFor > 0 { + t.clock.Sleep(t.sleepFor) + t.last = now.Add(t.sleepFor) + t.sleepFor = 0 + } else { + t.last = now + } + + return t.last +} + +type unlimited struct{} + +// NewUnlimited returns a RateLimiter that is not limited. +func NewUnlimited() Limiter { + return unlimited{} +} + +func (unlimited) Take() time.Time { + return time.Now() +} diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go index 8ffc31664..f900962e8 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -1,28 +1,62 @@ +// Copyright 2019 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated file. DO NOT EDIT. + // Package dns provides access to the Google Cloud DNS API. // -// See https://developers.google.com/cloud-dns +// For product documentation, see: https://developers.google.com/cloud-dns +// +// Creating a client // // Usage example: // // import "google.golang.org/api/dns/v1" // ... -// dnsService, err := dns.New(oauthHttpClient) +// ctx := context.Background() +// dnsService, err := dns.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// Other authentication options +// +// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// +// dnsService, err := dns.NewService(ctx, option.WithScopes(dns.NdevClouddnsReadwriteScope)) +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// dnsService, err := dns.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// dnsService, err := dns.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. package dns // import "google.golang.org/api/dns/v1" import ( "bytes" + "context" "encoding/json" "errors" "fmt" - context "golang.org/x/net/context" - ctxhttp "golang.org/x/net/context/ctxhttp" - gensupport "google.golang.org/api/gensupport" - googleapi "google.golang.org/api/googleapi" "io" "net/http" "net/url" "strconv" "strings" + + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + option "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code @@ -38,7 +72,6 @@ var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled -var _ = ctxhttp.Do const apiId = "dns:v1" const apiName = "dns" @@ -60,6 +93,35 @@ const ( NdevClouddnsReadwriteScope = "https://www.googleapis.com/auth/ndev.clouddns.readwrite" ) +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") @@ -153,7 +215,13 @@ type ResourceRecordSetsService struct { s *Service } -// Change: An atomic update to a collection of ResourceRecordSets. +// Change: A Change represents a set of ResourceRecordSet additions and +// deletions applied atomically to a ManagedZone. ResourceRecordSets +// within a ManagedZone are modified by creating a new Change element in +// the Changes collection. In turn the Changes collection also records +// the past modifications to the ResourceRecordSets in a ManagedZone. +// The current state of the ManagedZone is the sum effect of applying +// all Change elements in the Changes collection in sequence. type Change struct { // Additions: Which ResourceRecordSets to add? Additions []*ResourceRecordSet `json:"additions,omitempty"` @@ -177,7 +245,9 @@ type Change struct { // (output only). This is in RFC3339 text format. StartTime string `json:"startTime,omitempty"` - // Status: Status of the operation (output only). + // Status: Status of the operation (output only). A status of "done" + // means that the request to update the authoritative servers has been + // sent, but the servers might not be updated yet. // // Possible values: // "done" @@ -411,11 +481,12 @@ type DnsKeySpec struct { // KeyLength: Length of the keys in bits. KeyLength int64 `json:"keyLength,omitempty"` - // KeyType: One of "KEY_SIGNING" or "ZONE_SIGNING". Keys of type - // KEY_SIGNING have the Secure Entry Point flag set and, when active, - // will be used to sign only resource record sets of type DNSKEY. - // Otherwise, the Secure Entry Point flag will be cleared and this key - // will be used to sign only resource record sets of other types. + // KeyType: Specifies whether this is a key signing key (KSK) or a zone + // signing key (ZSK). Key signing keys have the Secure Entry Point flag + // set and, when active, will only be used to sign resource record sets + // of type DNSKEY. Zone signing keys do not have the Secure Entry Point + // flag set and will be used to sign all other types of resource record + // sets. // // Possible values: // "keySigning" @@ -546,6 +617,19 @@ type ManagedZone struct { // servers; defined by the server (output only) NameServers []string `json:"nameServers,omitempty"` + // PrivateVisibilityConfig: For privately visible zones, the set of + // Virtual Private Cloud resources that the zone is visible from. + PrivateVisibilityConfig *ManagedZonePrivateVisibilityConfig `json:"privateVisibilityConfig,omitempty"` + + // Visibility: The zone's visibility: public zones are exposed to the + // Internet, while private zones are visible only to Virtual Private + // Cloud resources. + // + // Possible values: + // "private" + // "public" + Visibility string `json:"visibility,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -672,6 +756,70 @@ func (s *ManagedZoneOperationsListResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ManagedZonePrivateVisibilityConfig struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#managedZonePrivateVisibilityConfig". + Kind string `json:"kind,omitempty"` + + // Networks: The list of VPC networks that can see this zone. + Networks []*ManagedZonePrivateVisibilityConfigNetwork `json:"networks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedZonePrivateVisibilityConfig) MarshalJSON() ([]byte, error) { + type NoMethod ManagedZonePrivateVisibilityConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ManagedZonePrivateVisibilityConfigNetwork struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#managedZonePrivateVisibilityConfigNetwork". + Kind string `json:"kind,omitempty"` + + // NetworkUrl: The fully qualified URL of the VPC network to bind to. + // This should be formatted like + // https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} + NetworkUrl string `json:"networkUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedZonePrivateVisibilityConfigNetwork) MarshalJSON() ([]byte, error) { + type NoMethod ManagedZonePrivateVisibilityConfigNetwork + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ManagedZonesListResponse struct { Header *ResponseHeader `json:"header,omitempty"` @@ -747,7 +895,9 @@ type Operation struct { StartTime string `json:"startTime,omitempty"` // Status: Status of the operation. Can be one of the following: - // "PENDING" or "DONE" (output only). + // "PENDING" or "DONE" (output only). A status of "DONE" means that the + // request to update the authoritative servers has been sent, but the + // servers might not be updated yet. // // Possible values: // "done" @@ -901,10 +1051,6 @@ func (s *Project) MarshalJSON() ([]byte, error) { // Quota: Limits associated with a Project. type Quota struct { - // BlackHoleHidesSystemZones: Whether a black hole zone should suppress - // system zones for this project. - BlackHoleHidesSystemZones bool `json:"blackHoleHidesSystemZones,omitempty"` - // DnsKeysPerManagedZone: Maximum allowed number of DnsKeys per // ManagedZone. DnsKeysPerManagedZone int64 `json:"dnsKeysPerManagedZone,omitempty"` @@ -916,6 +1062,14 @@ type Quota struct { // ManagedZones: Maximum allowed number of managed zones in the project. ManagedZones int64 `json:"managedZones,omitempty"` + // ManagedZonesPerNetwork: Maximum allowed number of managed zones which + // can be attached to a network. + ManagedZonesPerNetwork int64 `json:"managedZonesPerNetwork,omitempty"` + + // NetworksPerManagedZone: Maximum allowed number of networks to which a + // privately scoped zone can be attached. + NetworksPerManagedZone int64 `json:"networksPerManagedZone,omitempty"` + // ResourceRecordsPerRrset: Maximum allowed number of ResourceRecords // per ResourceRecordSet. ResourceRecordsPerRrset int64 `json:"resourceRecordsPerRrset,omitempty"` @@ -941,21 +1095,21 @@ type Quota struct { WhitelistedKeySpecs []*DnsKeySpec `json:"whitelistedKeySpecs,omitempty"` // ForceSendFields is a list of field names (e.g. - // "BlackHoleHidesSystemZones") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in + // "DnsKeysPerManagedZone") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in // ForceSendFields will be sent to the server regardless of whether the // field is empty or not. This may be used to include empty fields in // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. - // "BlackHoleHidesSystemZones") to include in API requests with the JSON - // null value. By default, fields with empty values are omitted from API - // requests. However, any field with an empty value appearing in - // NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. + // NullFields is a list of field names (e.g. "DnsKeysPerManagedZone") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -976,7 +1130,7 @@ type ResourceRecordSet struct { Name string `json:"name,omitempty"` // Rrdatas: As defined in RFC 1035 (section 5) and RFC 1034 (section - // 3.6.1). + // 3.6.1) -- see examples. Rrdatas []string `json:"rrdatas,omitempty"` // SignatureRrdatas: As defined in RFC 4034 (section 3.2). @@ -986,8 +1140,8 @@ type ResourceRecordSet struct { // resolvers. Ttl int64 `json:"ttl,omitempty"` - // Type: The identifier of a supported record type, for example, A, - // AAAA, MX, TXT, and so on. + // Type: The identifier of a supported record type. See the list of + // Supported DNS record types. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to @@ -1160,9 +1314,13 @@ func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -1327,9 +1485,13 @@ func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes/{changeId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -1523,9 +1685,13 @@ func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -1742,9 +1908,13 @@ func (c *DnsKeysGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -1935,9 +2105,13 @@ func (c *DnsKeysListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/dnsKeys") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -2133,9 +2307,13 @@ func (c *ManagedZoneOperationsGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -2323,9 +2501,13 @@ func (c *ManagedZoneOperationsListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/operations") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -2520,9 +2702,13 @@ func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -2663,9 +2849,13 @@ func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -2797,9 +2987,13 @@ func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -2974,9 +3168,13 @@ func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -3155,9 +3353,13 @@ func (c *ManagedZonesPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -3313,9 +3515,13 @@ func (c *ManagedZonesUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -3476,9 +3682,13 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -3655,9 +3865,13 @@ func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/rrsets") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, diff --git a/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/google.golang.org/api/gensupport/backoff.go index 135614047..94b7789ee 100644 --- a/vendor/google.golang.org/api/gensupport/backoff.go +++ b/vendor/google.golang.org/api/gensupport/backoff.go @@ -9,6 +9,8 @@ import ( "time" ) +// BackoffStrategy defines the set of functions that a backoff-er must +// implement. type BackoffStrategy interface { // Pause returns the duration of the next pause and true if the operation should be // retried, or false if no further retries should be attempted. @@ -28,6 +30,7 @@ type ExponentialBackoff struct { n uint } +// Pause returns the amount of time the caller should wait. func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { if eb.total > eb.Max { return 0, false @@ -40,6 +43,8 @@ func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { return d, true } +// Reset resets the backoff strategy such that the next Pause call will begin +// counting from the start. It is not safe to call concurrently with Pause. func (eb *ExponentialBackoff) Reset() { eb.n = 0 eb.total = 0 diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/gensupport/buffer.go index 992104911..3d0817ede 100644 --- a/vendor/google.golang.org/api/gensupport/buffer.go +++ b/vendor/google.golang.org/api/gensupport/buffer.go @@ -11,7 +11,8 @@ import ( "google.golang.org/api/googleapi" ) -// MediaBuffer buffers data from an io.Reader to support uploading media in retryable chunks. +// MediaBuffer buffers data from an io.Reader to support uploading media in +// retryable chunks. It should be created with NewMediaBuffer. type MediaBuffer struct { media io.Reader @@ -22,6 +23,7 @@ type MediaBuffer struct { off int64 } +// NewMediaBuffer initializes a MediaBuffer. func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer { return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)} } diff --git a/vendor/google.golang.org/api/gensupport/go18.go b/vendor/google.golang.org/api/gensupport/go18.go deleted file mode 100644 index c76cb8f20..000000000 --- a/vendor/google.golang.org/api/gensupport/go18.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package gensupport - -import ( - "io" - "net/http" -) - -// SetGetBody sets the GetBody field of req to f. -func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) { - req.GetBody = f -} diff --git a/vendor/google.golang.org/api/gensupport/jsonfloat.go b/vendor/google.golang.org/api/gensupport/jsonfloat.go index cb02335d2..837785081 100644 --- a/vendor/google.golang.org/api/gensupport/jsonfloat.go +++ b/vendor/google.golang.org/api/gensupport/jsonfloat.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go index 5895fef88..0ef96b3f1 100644 --- a/vendor/google.golang.org/api/gensupport/media.go +++ b/vendor/google.golang.org/api/gensupport/media.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "io/ioutil" + "mime" "mime/multipart" "net/http" "net/textproto" @@ -115,11 +116,15 @@ type multipartReader struct { pipeOpen bool } -func newMultipartReader(parts []typeReader) *multipartReader { +// boundary optionally specifies the MIME boundary +func newMultipartReader(parts []typeReader, boundary string) *multipartReader { mp := &multipartReader{pipeOpen: true} var pw *io.PipeWriter mp.pr, pw = io.Pipe() mpw := multipart.NewWriter(pw) + if boundary != "" { + mpw.SetBoundary(boundary) + } mp.ctype = "multipart/related; boundary=" + mpw.Boundary() go func() { for _, part := range parts { @@ -163,10 +168,15 @@ func (mp *multipartReader) Close() error { // // The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { + return combineBodyMedia(body, bodyContentType, media, mediaContentType, "") +} + +// combineBodyMedia is CombineBodyMedia but with an optional mimeBoundary field. +func combineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType, mimeBoundary string) (io.ReadCloser, string) { mp := newMultipartReader([]typeReader{ {body, bodyContentType}, {media, mediaContentType}, - }) + }, mimeBoundary) return mp, mp.ctype } @@ -242,6 +252,7 @@ func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *Med } } +// SetProgressUpdater sets the progress updater for the media info. func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) { if mi != nil { mi.progressUpdater = pu @@ -283,7 +294,11 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB getBody = func() (io.ReadCloser, error) { rb := ioutil.NopCloser(fb()) rm := ioutil.NopCloser(fm()) - r, _ := CombineBodyMedia(rb, "application/json", rm, mi.mType) + var mimeBoundary string + if _, params, err := mime.ParseMediaType(ctype); err == nil { + mimeBoundary = params["boundary"] + } + r, _ := combineBodyMedia(rb, "application/json", rm, mi.mType, mimeBoundary) return r, nil } } @@ -334,3 +349,15 @@ func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { }, } } + +// SetGetBody sets the GetBody field of req to f. This was once needed +// to gracefully support Go 1.7 and earlier which didn't have that +// field. +// +// Deprecated: the code generator no longer uses this as of +// 2019-02-19. Nothing else should be calling this anyway, but we +// won't delete this immediately; it will be deleted in as early as 6 +// months. +func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) { + req.GetBody = f +} diff --git a/vendor/google.golang.org/api/gensupport/not_go18.go b/vendor/google.golang.org/api/gensupport/not_go18.go deleted file mode 100644 index 2536501ce..000000000 --- a/vendor/google.golang.org/api/gensupport/not_go18.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package gensupport - -import ( - "io" - "net/http" -) - -func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {} diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/gensupport/params.go index 3b3c74396..0e878a425 100644 --- a/vendor/google.golang.org/api/gensupport/params.go +++ b/vendor/google.golang.org/api/gensupport/params.go @@ -43,6 +43,7 @@ func (u URLParams) Encode() string { return url.Values(u).Encode() } +// SetOptions sets the URL params and any additional call options. func SetOptions(u URLParams, opts ...googleapi.CallOption) { for _, o := range opts { u.Set(o.Get()) diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go index dcd591f7f..2552a6aca 100644 --- a/vendor/google.golang.org/api/gensupport/resumable.go +++ b/vendor/google.golang.org/api/gensupport/resumable.go @@ -5,14 +5,13 @@ package gensupport import ( + "context" "errors" "fmt" "io" "net/http" "sync" "time" - - "golang.org/x/net/context" ) const ( diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go index c60b3c394..fdde3f42c 100644 --- a/vendor/google.golang.org/api/gensupport/retry.go +++ b/vendor/google.golang.org/api/gensupport/retry.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,12 +15,11 @@ package gensupport import ( + "context" "io" "net" "net/http" "time" - - "golang.org/x/net/context" ) // Retry invokes the given function, retrying it multiple times if the connection failed or diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go index 0f75aa867..579939309 100644 --- a/vendor/google.golang.org/api/gensupport/send.go +++ b/vendor/google.golang.org/api/gensupport/send.go @@ -5,12 +5,10 @@ package gensupport import ( + "context" "encoding/json" "errors" "net/http" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" ) // Hook is the type of a function that is called once before each HTTP request @@ -32,7 +30,8 @@ func RegisterHook(h Hook) { // SendRequest sends a single HTTP request using the given client. // If ctx is non-nil, it calls all hooks, then sends the request with -// ctxhttp.Do, then calls any functions returned by the hooks in reverse order. +// req.WithContext, then calls any functions returned by the hooks in +// reverse order. func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. @@ -50,7 +49,7 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (* } // Send request. - resp, err := ctxhttp.Do(ctx, client, req) + resp, err := send(ctx, client, req) // Call returned funcs in reverse order. for i := len(post) - 1; i >= 0; i-- { @@ -61,6 +60,23 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (* return resp, err } +func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + // DecodeResponse decodes the body of res into target. If there is no body, // target is unchanged. func DecodeResponse(target interface{}, res *http.Response) error { diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index f6e15be35..ab5376762 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -37,24 +37,28 @@ type SizeReaderAt interface { // ServerResponse is embedded in each Do response and // provides the HTTP status code and header sent by the server. type ServerResponse struct { - // HTTPStatusCode is the server's response status code. - // When using a resource method's Do call, this will always be in the 2xx range. + // HTTPStatusCode is the server's response status code. When using a + // resource method's Do call, this will always be in the 2xx range. HTTPStatusCode int // Header contains the response header fields from the server. Header http.Header } const ( + // Version defines the gax version being used. This is typically sent + // in an HTTP header to services. Version = "0.5" // UserAgent is the header string used to identify this package. UserAgent = "google-api-go-client/" + Version - // The default chunk size to use for resumable uploads if not specified by the user. + // DefaultUploadChunkSize is the default chunk size to use for resumable + // uploads if not specified by the user. DefaultUploadChunkSize = 8 * 1024 * 1024 - // The minimum chunk size that can be used for resumable uploads. All - // user-specified chunk sizes must be multiple of this value. + // MinUploadChunkSize is the minimum chunk size that can be used for + // resumable uploads. All user-specified chunk sizes must be multiple of + // this value. MinUploadChunkSize = 256 * 1024 ) @@ -161,9 +165,13 @@ func CheckMediaResponse(res *http.Response) error { } } +// MarshalStyle defines whether to marshal JSON with a {"data": ...} wrapper. type MarshalStyle bool +// WithDataWrapper marshals JSON with a {"data": ...} wrapper. var WithDataWrapper = MarshalStyle(true) + +// WithoutDataWrapper marshals JSON without a {"data": ...} wrapper. var WithoutDataWrapper = MarshalStyle(false) func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { @@ -181,37 +189,12 @@ func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { return buf, nil } -// endingWithErrorReader from r until it returns an error. If the -// final error from r is io.EOF and e is non-nil, e is used instead. -type endingWithErrorReader struct { - r io.Reader - e error -} - -func (er endingWithErrorReader) Read(p []byte) (n int, err error) { - n, err = er.r.Read(p) - if err == io.EOF && er.e != nil { - err = er.e - } - return -} - -// countingWriter counts the number of bytes it receives to write, but -// discards them. -type countingWriter struct { - n *int64 -} - -func (w countingWriter) Write(p []byte) (int, error) { - *w.n += int64(len(p)) - return len(p), nil -} - // ProgressUpdater is a function that is called upon every progress update of a resumable upload. // This is the only part of a resumable upload (from googleapi) that is usable by the developer. // The remaining usable pieces of resumable uploads is exposed in each auto-generated API. type ProgressUpdater func(current, total int64) +// MediaOption defines the interface for setting media options. type MediaOption interface { setOptions(o *MediaOptions) } @@ -268,13 +251,27 @@ func ProcessMediaOptions(opts []MediaOption) *MediaOptions { return mo } +// ResolveRelative resolves relatives such as "http://www.golang.org/" and +// "topics/myproject/mytopic" into a single string, such as +// "http://www.golang.org/topics/myproject/mytopic". It strips all parent +// references (e.g. ../..) as well as anything after the host +// (e.g. /bar/gaz gets stripped out of foo.com/bar/gaz). func ResolveRelative(basestr, relstr string) string { u, _ := url.Parse(basestr) + afterColonPath := "" + if i := strings.IndexRune(relstr, ':'); i > 0 { + afterColonPath = relstr[i+1:] + relstr = relstr[:i] + } rel, _ := url.Parse(relstr) u = u.ResolveReference(rel) us := u.String() + if afterColonPath != "" { + us = fmt.Sprintf("%s:%s", us, afterColonPath) + } us = strings.Replace(us, "%7B", "{", -1) us = strings.Replace(us, "%7D", "}", -1) + us = strings.Replace(us, "%2A", "*", -1) return us } diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go new file mode 100644 index 000000000..eca1ea250 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -0,0 +1,38 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport contains HTTP transports used to make +// authenticated API requests. +package transport + +import ( + "errors" + "net/http" +) + +// APIKey is an HTTP Transport which wraps an underlying transport and +// appends an API Key "key" parameter to the URL of outgoing requests. +type APIKey struct { + // Key is the API Key to set on requests. + Key string + + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.Transport + if rt == nil { + rt = http.DefaultTransport + if rt == nil { + return nil, errors.New("googleapi/transport: no Transport specified or available") + } + } + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return rt.RoundTrip(&newReq) +} diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go index c8fdd5416..a280e3021 100644 --- a/vendor/google.golang.org/api/googleapi/types.go +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -120,33 +120,33 @@ func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { return dst, nil } -func (s Int64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendInt(dst, s[i], 10) +func (q Int64s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, q[i], 10) }) } -func (s Int32s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendInt(dst, int64(s[i]), 10) +func (q Int32s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, int64(q[i]), 10) }) } -func (s Uint64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendUint(dst, s[i], 10) +func (q Uint64s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, q[i], 10) }) } -func (s Uint32s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendUint(dst, uint64(s[i]), 10) +func (q Uint32s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, uint64(q[i]), 10) }) } -func (s Float64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendFloat(dst, s[i], 'g', -1, 64) +func (q Float64s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendFloat(dst, q[i], 'g', -1, 64) }) } diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go new file mode 100644 index 000000000..69b8659fd --- /dev/null +++ b/vendor/google.golang.org/api/internal/creds.go @@ -0,0 +1,102 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + + "golang.org/x/oauth2" + + "golang.org/x/oauth2/google" +) + +// Creds returns credential information obtained from DialSettings, or if none, then +// it returns default credential information. +func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + if ds.Credentials != nil { + return ds.Credentials, nil + } + if ds.CredentialsJSON != nil { + return credentialsFromJSON(ctx, ds.CredentialsJSON, ds.Endpoint, ds.Scopes, ds.Audiences) + } + if ds.CredentialsFile != "" { + data, err := ioutil.ReadFile(ds.CredentialsFile) + if err != nil { + return nil, fmt.Errorf("cannot read credentials file: %v", err) + } + return credentialsFromJSON(ctx, data, ds.Endpoint, ds.Scopes, ds.Audiences) + } + if ds.TokenSource != nil { + return &google.Credentials{TokenSource: ds.TokenSource}, nil + } + cred, err := google.FindDefaultCredentials(ctx, ds.Scopes...) + if err != nil { + return nil, err + } + if len(cred.JSON) > 0 { + return credentialsFromJSON(ctx, cred.JSON, ds.Endpoint, ds.Scopes, ds.Audiences) + } + // For GAE and GCE, the JSON is empty so return the default credentials directly. + return cred, nil +} + +// JSON key file type. +const ( + serviceAccountKey = "service_account" +) + +// credentialsFromJSON returns a google.Credentials based on the input. +// +// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow +// - Otherwise, returns OAuth 2.0 flow. +func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scopes []string, audiences []string) (*google.Credentials, error) { + cred, err := google.CredentialsFromJSON(ctx, data, scopes...) + if err != nil { + return nil, err + } + if len(data) > 0 && len(scopes) == 0 { + var f struct { + Type string `json:"type"` + // The rest JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(cred.JSON, &f); err != nil { + return nil, err + } + if f.Type == serviceAccountKey { + ts, err := selfSignedJWTTokenSource(data, endpoint, audiences) + if err != nil { + return nil, err + } + cred.TokenSource = ts + } + } + return cred, err +} + +func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) (oauth2.TokenSource, error) { + // Use the API endpoint as the default audience + audience := endpoint + if len(audiences) > 0 { + // TODO(shinfan): Update golang oauth to support multiple audiences. + if len(audiences) > 1 { + return nil, fmt.Errorf("multiple audiences support is not implemented") + } + audience = audiences[0] + } + return google.JWTAccessTokenSourceFromJSON(data, audience) +} diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go new file mode 100644 index 000000000..a4426dcb7 --- /dev/null +++ b/vendor/google.golang.org/api/internal/pool.go @@ -0,0 +1,61 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "errors" + + "google.golang.org/grpc/naming" +) + +// PoolResolver provides a fixed list of addresses to load balance between +// and does not provide further updates. +type PoolResolver struct { + poolSize int + dialOpt *DialSettings + ch chan []*naming.Update +} + +// NewPoolResolver returns a PoolResolver +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func NewPoolResolver(size int, o *DialSettings) *PoolResolver { + return &PoolResolver{poolSize: size, dialOpt: o} +} + +// Resolve returns a Watcher for the endpoint defined by the DialSettings +// provided to NewPoolResolver. +func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { + if r.dialOpt.Endpoint == "" { + return nil, errors.New("no endpoint configured") + } + addrs := make([]*naming.Update, 0, r.poolSize) + for i := 0; i < r.poolSize; i++ { + addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i}) + } + r.ch = make(chan []*naming.Update, 1) + r.ch <- addrs + return r, nil +} + +// Next returns a static list of updates on the first call, +// and blocks indefinitely until Close is called on subsequent calls. +func (r *PoolResolver) Next() ([]*naming.Update, error) { + return <-r.ch, nil +} + +// Close releases resources associated with the pool and causes Next to unblock. +func (r *PoolResolver) Close() { + close(r.ch) +} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go new file mode 100644 index 000000000..062301c65 --- /dev/null +++ b/vendor/google.golang.org/api/internal/settings.go @@ -0,0 +1,96 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal supports the options and transport packages. +package internal + +import ( + "errors" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/grpc" +) + +// DialSettings holds information needed to establish a connection with a +// Google API service. +type DialSettings struct { + Endpoint string + Scopes []string + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + UserAgent string + APIKey string + Audiences []string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + NoAuth bool + + // Google API system parameters. For more information please read: + // https://cloud.google.com/apis/docs/system-parameters + QuotaProject string + RequestReason string +} + +// Validate reports an error if ds is invalid. +func (ds *DialSettings) Validate() error { + hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil + if ds.NoAuth && hasCreds { + return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") + } + // Credentials should not appear with other options. + // We currently allow TokenSource and CredentialsFile to coexist. + // TODO(jba): make TokenSource & CredentialsFile an error (breaking change). + nCreds := 0 + if ds.Credentials != nil { + nCreds++ + } + if ds.CredentialsJSON != nil { + nCreds++ + } + if ds.CredentialsFile != "" { + nCreds++ + } + if ds.APIKey != "" { + nCreds++ + } + if ds.TokenSource != nil { + nCreds++ + } + if len(ds.Scopes) > 0 && len(ds.Audiences) > 0 { + return errors.New("WithScopes is incompatible with WithAudience") + } + // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility. + if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { + return errors.New("multiple credential options provided") + } + if ds.HTTPClient != nil && ds.GRPCConn != nil { + return errors.New("WithHTTPClient is incompatible with WithGRPCConn") + } + if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { + return errors.New("WithHTTPClient is incompatible with gRPC dial options") + } + if ds.HTTPClient != nil && ds.QuotaProject != "" { + return errors.New("WithHTTPClient is incompatible with QuotaProject") + } + if ds.HTTPClient != nil && ds.RequestReason != "" { + return errors.New("WithHTTPClient is incompatible with RequestReason") + } + + return nil +} diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go new file mode 100644 index 000000000..0636a8294 --- /dev/null +++ b/vendor/google.golang.org/api/option/credentials_go19.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.Credentials)(w) +} + +// WithCredentials returns a ClientOption that authenticates API calls. +func WithCredentials(creds *google.Credentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go new file mode 100644 index 000000000..74d3a4b5b --- /dev/null +++ b/vendor/google.golang.org/api/option/credentials_notgo19.go @@ -0,0 +1,32 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.DefaultCredentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.DefaultCredentials)(w) +} + +func WithCredentials(creds *google.DefaultCredentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go new file mode 100644 index 000000000..0a1c2dba9 --- /dev/null +++ b/vendor/google.golang.org/api/option/option.go @@ -0,0 +1,235 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package option contains options for Google API clients. +package option + +import ( + "net/http" + + "golang.org/x/oauth2" + "google.golang.org/api/internal" + "google.golang.org/grpc" +) + +// A ClientOption is an option for a Google API client. +type ClientOption interface { + Apply(*internal.DialSettings) +} + +// WithTokenSource returns a ClientOption that specifies an OAuth2 token +// source to be used as the basis for authentication. +func WithTokenSource(s oauth2.TokenSource) ClientOption { + return withTokenSource{s} +} + +type withTokenSource struct{ ts oauth2.TokenSource } + +func (w withTokenSource) Apply(o *internal.DialSettings) { + o.TokenSource = w.ts +} + +type withCredFile string + +func (w withCredFile) Apply(o *internal.DialSettings) { + o.CredentialsFile = string(w) +} + +// WithCredentialsFile returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials file. +func WithCredentialsFile(filename string) ClientOption { + return withCredFile(filename) +} + +// WithServiceAccountFile returns a ClientOption that uses a Google service +// account credentials file to authenticate. +// +// Deprecated: Use WithCredentialsFile instead. +func WithServiceAccountFile(filename string) ClientOption { + return WithCredentialsFile(filename) +} + +// WithCredentialsJSON returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials. +func WithCredentialsJSON(p []byte) ClientOption { + return withCredentialsJSON(p) +} + +type withCredentialsJSON []byte + +func (w withCredentialsJSON) Apply(o *internal.DialSettings) { + o.CredentialsJSON = make([]byte, len(w)) + copy(o.CredentialsJSON, w) +} + +// WithEndpoint returns a ClientOption that overrides the default endpoint +// to be used for a service. +func WithEndpoint(url string) ClientOption { + return withEndpoint(url) +} + +type withEndpoint string + +func (w withEndpoint) Apply(o *internal.DialSettings) { + o.Endpoint = string(w) +} + +// WithScopes returns a ClientOption that overrides the default OAuth2 scopes +// to be used for a service. +func WithScopes(scope ...string) ClientOption { + return withScopes(scope) +} + +type withScopes []string + +func (w withScopes) Apply(o *internal.DialSettings) { + o.Scopes = make([]string, len(w)) + copy(o.Scopes, w) +} + +// WithUserAgent returns a ClientOption that sets the User-Agent. +func WithUserAgent(ua string) ClientOption { + return withUA(ua) +} + +type withUA string + +func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) } + +// WithHTTPClient returns a ClientOption that specifies the HTTP client to use +// as the basis of communications. This option may only be used with services +// that support HTTP as their communication transport. When used, the +// WithHTTPClient option takes precedent over all other supplied options. +func WithHTTPClient(client *http.Client) ClientOption { + return withHTTPClient{client} +} + +type withHTTPClient struct{ client *http.Client } + +func (w withHTTPClient) Apply(o *internal.DialSettings) { + o.HTTPClient = w.client +} + +// WithGRPCConn returns a ClientOption that specifies the gRPC client +// connection to use as the basis of communications. This option many only be +// used with services that support gRPC as their communication transport. When +// used, the WithGRPCConn option takes precedent over all other supplied +// options. +func WithGRPCConn(conn *grpc.ClientConn) ClientOption { + return withGRPCConn{conn} +} + +type withGRPCConn struct{ conn *grpc.ClientConn } + +func (w withGRPCConn) Apply(o *internal.DialSettings) { + o.GRPCConn = w.conn +} + +// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption +// to an underlying gRPC dial. It does not work with WithGRPCConn. +func WithGRPCDialOption(opt grpc.DialOption) ClientOption { + return withGRPCDialOption{opt} +} + +type withGRPCDialOption struct{ opt grpc.DialOption } + +func (w withGRPCDialOption) Apply(o *internal.DialSettings) { + o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) +} + +// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC +// connections that requests will be balanced between. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithGRPCConnectionPool(size int) ClientOption { + return withGRPCConnectionPool(size) +} + +type withGRPCConnectionPool int + +func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { + balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o)) + o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer)) +} + +// WithAPIKey returns a ClientOption that specifies an API key to be used +// as the basis for authentication. +// +// API Keys can only be used for JSON-over-HTTP APIs, including those under +// the import path google.golang.org/api/.... +func WithAPIKey(apiKey string) ClientOption { + return withAPIKey(apiKey) +} + +type withAPIKey string + +func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } + +// WithAudiences returns a ClientOption that specifies an audience to be used +// as the audience field ("aud") for the JWT token authentication. +func WithAudiences(audience ...string) ClientOption { + return withAudiences(audience) +} + +type withAudiences []string + +func (w withAudiences) Apply(o *internal.DialSettings) { + o.Audiences = make([]string, len(w)) + copy(o.Audiences, w) +} + +// WithoutAuthentication returns a ClientOption that specifies that no +// authentication should be used. It is suitable only for testing and for +// accessing public resources, like public Google Cloud Storage buckets. +// It is an error to provide both WithoutAuthentication and any of WithAPIKey, +// WithTokenSource, WithCredentialsFile or WithServiceAccountFile. +func WithoutAuthentication() ClientOption { + return withoutAuthentication{} +} + +type withoutAuthentication struct{} + +func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } + +// WithQuotaProject returns a ClientOption that specifies the project used +// for quota and billing purposes. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithQuotaProject(quotaProject string) ClientOption { + return withQuotaProject(quotaProject) +} + +type withQuotaProject string + +func (w withQuotaProject) Apply(o *internal.DialSettings) { + o.QuotaProject = string(w) +} + +// WithRequestReason returns a ClientOption that specifies a reason for +// making the request, which is intended to be recorded in audit logging. +// An example reason would be a support-case ticket number. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithRequestReason(requestReason string) ClientOption { + return withRequestReason(requestReason) +} + +type withRequestReason string + +func (w withRequestReason) Apply(o *internal.DialSettings) { + o.RequestReason = string(w) +} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go new file mode 100644 index 000000000..c0d8bf20b --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -0,0 +1,161 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package http supports network connections to HTTP servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package http + +import ( + "context" + "errors" + "net/http" + + "go.opencensus.io/plugin/ochttp" + "golang.org/x/oauth2" + "google.golang.org/api/googleapi/transport" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/api/transport/http/internal/propagation" +) + +// NewClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, "", err + } + // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? + if settings.HTTPClient != nil { + return settings.HTTPClient, settings.Endpoint, nil + } + trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings) + if err != nil { + return nil, "", err + } + return &http.Client{Transport: trans}, settings.Endpoint, nil +} + +// NewTransport creates an http.RoundTripper for use communicating with a Google +// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base. +func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, err + } + if settings.HTTPClient != nil { + return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") + } + return newTransport(ctx, base, settings) +} + +func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { + trans := base + trans = parameterTransport{ + base: trans, + userAgent: settings.UserAgent, + quotaProject: settings.QuotaProject, + requestReason: settings.RequestReason, + } + trans = addOCTransport(trans) + switch { + case settings.NoAuth: + // Do nothing. + case settings.APIKey != "": + trans = &transport.APIKey{ + Transport: trans, + Key: settings.APIKey, + } + default: + creds, err := internal.Creds(ctx, settings) + if err != nil { + return nil, err + } + trans = &oauth2.Transport{ + Base: trans, + Source: creds.TokenSource, + } + } + return trans, nil +} + +func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + if o.GRPCConn != nil { + return nil, errors.New("unsupported gRPC connection specified") + } + return &o, nil +} + +type parameterTransport struct { + userAgent string + quotaProject string + requestReason string + + base http.RoundTripper +} + +func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + if rt == nil { + return nil, errors.New("transport: no Transport specified") + } + if t.userAgent == "" { + return rt.RoundTrip(req) + } + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + // TODO(cbro): append to existing User-Agent header? + newReq.Header.Set("User-Agent", t.userAgent) + + // Attach system parameters into the header + if t.quotaProject != "" { + newReq.Header.Set("X-Goog-User-Project", t.quotaProject) + } + if t.requestReason != "" { + newReq.Header.Set("X-Goog-Request-Reason", t.requestReason) + } + + return rt.RoundTrip(&newReq) +} + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineUrlfetchHook func(context.Context) http.RoundTripper + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport. +func defaultBaseTransport(ctx context.Context) http.RoundTripper { + if appengineUrlfetchHook != nil { + return appengineUrlfetchHook(ctx) + } + return http.DefaultTransport +} + +func addOCTransport(trans http.RoundTripper) http.RoundTripper { + return &ochttp.Transport{ + Base: trans, + Propagation: &propagation.HTTPFormat{}, + } +} diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go new file mode 100644 index 000000000..04c81413c --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/dial_appengine.go @@ -0,0 +1,30 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build appengine + +package http + +import ( + "context" + "net/http" + + "google.golang.org/appengine/urlfetch" +) + +func init() { + appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper { + return &urlfetch.Transport{Context: ctx} + } +} diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go new file mode 100644 index 000000000..24b4f0d29 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go @@ -0,0 +1,96 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +// Package propagation implements X-Cloud-Trace-Context header propagation used +// by Google Cloud products. +package propagation + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + httpHeader = `X-Cloud-Trace-Context` +) + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. +type HTTPFormat struct{} + +// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(httpHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(httpHeader, header) +}