fix: update lego.

This commit is contained in:
Fernandez Ludovic 2019-04-26 11:08:44 +02:00 committed by Traefiker Bot
parent b8b0c8f3e5
commit 8d848c3d60
169 changed files with 12224 additions and 605 deletions

58
Gopkg.lock generated
View file

@ -598,7 +598,7 @@
revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
[[projects]]
digest = "1:a04af13190b67ff69cf8fcd79ee133a24c4a7a900cacbc296261dd43f3fbde5c"
digest = "1:68a7713d996a30a8394715220e779bbcbc880a18b1e0ab0e12fd1fbbf9c711e6"
name = "github.com/go-acme/lego"
packages = [
"acme",
@ -683,8 +683,8 @@
"registration",
]
pruneopts = "NUT"
revision = "aaecc1ca7254190b71c5f01f57ee3bb6701bc937"
version = "v2.4.0"
revision = "3d13faf68920543a393ad6cdfdea429627af2d34"
version = "v2.5.0"
[[projects]]
branch = "fork-containous"
@ -1416,17 +1416,18 @@
revision = "256dc444b735e061061cf46c809487313d5b0065"
[[projects]]
branch = "master"
digest = "1:fff470b0a7bbf05cfe8bfc73bfdf4d21eb009ea84e601f3d27781474e5da960f"
digest = "1:253f275bd72c42f8d234712d1574c8b222fe9b72838bfaca11b21ace9c0e3d0a"
name = "github.com/sacloud/libsacloud"
packages = [
".",
"api",
"sacloud",
"sacloud/ostype",
"utils/mutexkv",
]
pruneopts = "NUT"
revision = "306ea89b6ef19334614f7b0fc5aa19595022bb8c"
revision = "41c392dee98a83260abbe0fcd5c13beb7c75d103"
version = "v1.21.1"
[[projects]]
digest = "1:6bc0652ea6e39e22ccd522458b8bdd8665bf23bdc5a20eec90056e4dc7e273ca"
@ -1626,6 +1627,42 @@
pruneopts = "NUT"
revision = "0c8571ac0ce161a5feb57375a9cdf148c98c0f70"
[[projects]]
digest = "1:aafe0319af5410fb19a23a575ea6ee4b14253e122ef87f936bac65ea1e6b280c"
name = "go.opencensus.io"
packages = [
".",
"internal",
"internal/tagencoding",
"metric/metricdata",
"metric/metricproducer",
"plugin/ochttp",
"plugin/ochttp/propagation/b3",
"resource",
"stats",
"stats/internal",
"stats/view",
"tag",
"trace",
"trace/internal",
"trace/propagation",
"trace/tracestate",
]
pruneopts = "NUT"
revision = "df6e2001952312404b06f5f6f03fcb4aec1648e5"
version = "v0.21.0"
[[projects]]
branch = "master"
digest = "1:02fe59517e10f9b400b500af8ac228c74cecb0cba7a5f438d8283edb97e14270"
name = "go.uber.org/ratelimit"
packages = [
".",
"internal/clock",
]
pruneopts = "NUT"
revision = "c15da02342779cb6dc027fc95ee2277787698f36"
[[projects]]
branch = "master"
digest = "1:30c1930f8c9fee79f3af60c8b7cd92edd12a4f22187f5527d53509b1a794f555"
@ -1744,16 +1781,21 @@
[[projects]]
branch = "master"
digest = "1:da32ebe70dd3ec97d2df26281b08b18d05c2f12491ae79f389813f6c8d3006b3"
digest = "1:70c173b8ecc111dd01dc07f0ada72c076e4ed91618ee559312ef8adf154cc539"
name = "google.golang.org/api"
packages = [
"dns/v1",
"gensupport",
"googleapi",
"googleapi/internal/uritemplates",
"googleapi/transport",
"internal",
"option",
"transport/http",
"transport/http/internal/propagation",
]
pruneopts = "NUT"
revision = "de943baf05a022a8f921b544b7827bacaba1aed5"
revision = "067bed655e9cbc26f4dbac8f8897b30756d90990"
[[projects]]
digest = "1:7206d98ec77c90c72ec2c405181a1dcf86965803b6dbc4f98ceab7a5047c37a9"

View file

@ -193,9 +193,8 @@ required = [
name = "github.com/vulcand/oxy"
[[constraint]]
# branch = "master"
name = "github.com/go-acme/lego"
version = "2.4.0"
version = "2.5.0"
[[constraint]]
name = "google.golang.org/grpc"

View file

@ -6,7 +6,6 @@ import (
"crypto/elliptic"
"crypto/rsa"
"encoding/base64"
"errors"
"fmt"
"github.com/go-acme/lego/acme/api/internal/nonces"
@ -118,9 +117,6 @@ func (j *JWS) GetKeyAuthorization(token string) (string, error) {
// Generate the Key Authorization for the challenge
jwk := &jose.JSONWebKey{Key: publicKey}
if jwk == nil {
return "", errors.New("could not generate JWK from key")
}
thumbBytes, err := jwk.Thumbprint(crypto.SHA256)
if err != nil {

View file

@ -5,7 +5,7 @@ package sender
const (
// ourUserAgent is the User-Agent of this underlying library package.
ourUserAgent = "xenolf-acme/2.4.0"
ourUserAgent = "xenolf-acme/2.5.0"
// ourUserAgentComment is part of the UA comment linked to the version status of this underlying library package.
// values: detach|release

View file

@ -114,6 +114,7 @@ func (c *Certifier) Obtain(request ObtainRequest) (*Resource, error) {
err = c.resolver.Solve(authz)
if err != nil {
// If any challenge fails, return. Do not generate partial SAN certificates.
c.deactivateAuthorizations(order)
return nil, err
}
@ -170,6 +171,7 @@ func (c *Certifier) ObtainForCSR(csr x509.CertificateRequest, bundle bool) (*Res
err = c.resolver.Solve(authz)
if err != nil {
// If any challenge fails, return. Do not generate partial SAN certificates.
c.deactivateAuthorizations(order)
return nil, err
}

View file

@ -27,7 +27,7 @@ func NewDefaultConfig() *Config {
return &Config{
PropagationTimeout: env.GetOrDefaultSecond("CLOUDNS_PROPAGATION_TIMEOUT", 120*time.Second),
PollingInterval: env.GetOrDefaultSecond("CLOUDNS_POLLING_INTERVAL", 4*time.Second),
TTL: env.GetOrDefaultInt("CLOUDNS_TTL", dns01.DefaultTTL),
TTL: env.GetOrDefaultInt("CLOUDNS_TTL", 60),
HTTPClient: &http.Client{
Timeout: env.GetOrDefaultSecond("CLOUDNS_HTTP_TIMEOUT", 30*time.Second),
},
@ -64,7 +64,7 @@ func NewDNSProviderConfig(config *Config) (*DNSProvider, error) {
client, err := internal.NewClient(config.AuthID, config.AuthPassword)
if err != nil {
return nil, err
return nil, fmt.Errorf("ClouDNS: %v", err)
}
client.HTTPClient = config.HTTPClient
@ -78,10 +78,15 @@ func (d *DNSProvider) Present(domain, token, keyAuth string) error {
zone, err := d.client.GetZone(fqdn)
if err != nil {
return err
return fmt.Errorf("ClouDNS: %v", err)
}
return d.client.AddTxtRecord(zone.Name, fqdn, value, d.config.TTL)
err = d.client.AddTxtRecord(zone.Name, fqdn, value, d.config.TTL)
if err != nil {
return fmt.Errorf("ClouDNS: %v", err)
}
return nil
}
// CleanUp removes the TXT record matching the specified parameters.
@ -90,15 +95,23 @@ func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error {
zone, err := d.client.GetZone(fqdn)
if err != nil {
return err
return fmt.Errorf("ClouDNS: %v", err)
}
record, err := d.client.FindTxtRecord(zone.Name, fqdn)
if err != nil {
return err
return fmt.Errorf("ClouDNS: %v", err)
}
return d.client.RemoveTxtRecord(record.ID, zone.Name)
if record == nil {
return nil
}
err = d.client.RemoveTxtRecord(record.ID, zone.Name)
if err != nil {
return fmt.Errorf("ClouDNS: %v", err)
}
return nil
}
// Timeout returns the timeout and interval to use when checking for DNS propagation.

View file

@ -2,6 +2,7 @@ package internal
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
@ -14,6 +15,11 @@ import (
const defaultBaseURL = "https://api.cloudns.net/dns/"
type apiResponse struct {
Status string `json:"status"`
StatusDescription string `json:"statusDescription"`
}
type Zone struct {
Name string
Type string
@ -37,11 +43,11 @@ type TXTRecords map[string]TXTRecord
// NewClient creates a ClouDNS client
func NewClient(authID string, authPassword string) (*Client, error) {
if authID == "" {
return nil, fmt.Errorf("ClouDNS: credentials missing: authID")
return nil, fmt.Errorf("credentials missing: authID")
}
if authPassword == "" {
return nil, fmt.Errorf("ClouDNS: credentials missing: authPassword")
return nil, fmt.Errorf("credentials missing: authPassword")
}
baseURL, err := url.Parse(defaultBaseURL)
@ -90,7 +96,7 @@ func (c *Client) GetZone(authFQDN string) (*Zone, error) {
if len(result) > 0 {
if err = json.Unmarshal(result, &zone); err != nil {
return nil, fmt.Errorf("ClouDNS: zone unmarshaling error: %v", err)
return nil, fmt.Errorf("zone unmarshaling error: %v", err)
}
}
@ -98,7 +104,7 @@ func (c *Client) GetZone(authFQDN string) (*Zone, error) {
return &zone, nil
}
return nil, fmt.Errorf("ClouDNS: zone %s not found for authFQDN %s", authZoneName, authFQDN)
return nil, fmt.Errorf("zone %s not found for authFQDN %s", authZoneName, authFQDN)
}
// FindTxtRecord return the TXT record a zone ID and a FQDN
@ -119,9 +125,14 @@ func (c *Client) FindTxtRecord(zoneName, fqdn string) (*TXTRecord, error) {
return nil, err
}
// the API returns [] when there is no records.
if string(result) == "[]" {
return nil, nil
}
var records TXTRecords
if err = json.Unmarshal(result, &records); err != nil {
return nil, fmt.Errorf("ClouDNS: TXT record unmarshaling error: %v", err)
return nil, fmt.Errorf("TXT record unmarshaling error: %v: %s", err, string(result))
}
for _, record := range records {
@ -130,7 +141,7 @@ func (c *Client) FindTxtRecord(zoneName, fqdn string) (*TXTRecord, error) {
}
}
return nil, fmt.Errorf("ClouDNS: no existing record found for %q", fqdn)
return nil, nil
}
// AddTxtRecord add a TXT record
@ -144,12 +155,25 @@ func (c *Client) AddTxtRecord(zoneName string, fqdn, value string, ttl int) erro
q.Add("domain-name", zoneName)
q.Add("host", host)
q.Add("record", value)
q.Add("ttl", strconv.Itoa(ttl))
q.Add("ttl", strconv.Itoa(ttlRounder(ttl)))
q.Add("record-type", "TXT")
reqURL.RawQuery = q.Encode()
_, err := c.doRequest(http.MethodPost, &reqURL)
return err
raw, err := c.doRequest(http.MethodPost, &reqURL)
if err != nil {
return err
}
resp := apiResponse{}
if err = json.Unmarshal(raw, &resp); err != nil {
return fmt.Errorf("apiResponse unmarshaling error: %v: %s", err, string(raw))
}
if resp.Status != "Success" {
return fmt.Errorf("fail to add TXT record: %s %s", resp.Status, resp.StatusDescription)
}
return nil
}
// RemoveTxtRecord remove a TXT record
@ -162,8 +186,21 @@ func (c *Client) RemoveTxtRecord(recordID int, zoneName string) error {
q.Add("record-id", strconv.Itoa(recordID))
reqURL.RawQuery = q.Encode()
_, err := c.doRequest(http.MethodPost, &reqURL)
return err
raw, err := c.doRequest(http.MethodPost, &reqURL)
if err != nil {
return err
}
resp := apiResponse{}
if err = json.Unmarshal(raw, &resp); err != nil {
return fmt.Errorf("apiResponse unmarshaling error: %v: %s", err, string(raw))
}
if resp.Status != "Success" {
return fmt.Errorf("fail to add TXT record: %s %s", resp.Status, resp.StatusDescription)
}
return nil
}
func (c *Client) doRequest(method string, url *url.URL) (json.RawMessage, error) {
@ -174,18 +211,18 @@ func (c *Client) doRequest(method string, url *url.URL) (json.RawMessage, error)
resp, err := c.HTTPClient.Do(req)
if err != nil {
return nil, fmt.Errorf("ClouDNS: %v", err)
return nil, err
}
defer resp.Body.Close()
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("ClouDNS: %s", toUnreadableBodyMessage(req, content))
return nil, errors.New(toUnreadableBodyMessage(req, content))
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("ClouDNS: invalid code (%v), error: %s", resp.StatusCode, content)
return nil, fmt.Errorf("invalid code (%v), error: %s", resp.StatusCode, content)
}
return content, nil
}
@ -198,7 +235,7 @@ func (c *Client) buildRequest(method string, url *url.URL) (*http.Request, error
req, err := http.NewRequest(method, url.String(), nil)
if err != nil {
return nil, fmt.Errorf("ClouDNS: invalid request: %v", err)
return nil, fmt.Errorf("invalid request: %v", err)
}
return req, nil
@ -207,3 +244,28 @@ func (c *Client) buildRequest(method string, url *url.URL) (*http.Request, error
func toUnreadableBodyMessage(req *http.Request, rawBody []byte) string {
return fmt.Sprintf("the request %s sent a response with a body which is an invalid format: %q", req.URL, string(rawBody))
}
// https://www.cloudns.net/wiki/article/58/
// Available TTL's:
// 60 = 1 minute
// 300 = 5 minutes
// 900 = 15 minutes
// 1800 = 30 minutes
// 3600 = 1 hour
// 21600 = 6 hours
// 43200 = 12 hours
// 86400 = 1 day
// 172800 = 2 days
// 259200 = 3 days
// 604800 = 1 week
// 1209600 = 2 weeks
// 2592000 = 1 month
func ttlRounder(ttl int) int {
for _, validTTL := range []int{60, 300, 900, 1800, 3600, 21600, 43200, 86400, 172800, 259200, 604800, 1209600} {
if ttl <= validTTL {
return validTTL
}
}
return 2592000
}

View file

@ -47,13 +47,13 @@ func (d *DNSProvider) addTXTRecord(domain string, name string, value string, ttl
return err
}
message := &apiResponse{}
err = d.do(req, message)
message := apiResponse{}
err = d.do(req, &message)
if err != nil {
return fmt.Errorf("unable to create TXT record for domain %s and name %s: %v", domain, name, err)
}
if message != nil && len(message.Message) > 0 {
if len(message.Message) > 0 {
log.Infof("API response: %s", message.Message)
}
@ -87,13 +87,13 @@ func (d *DNSProvider) deleteTXTRecord(domain string, name string) error {
return err
}
message := &apiResponse{}
err = d.do(req, message)
message := apiResponse{}
err = d.do(req, &message)
if err != nil {
return fmt.Errorf("unable to delete TXT record for domain %s and name %s: %v", domain, name, err)
}
if message != nil && len(message.Message) > 0 {
if len(message.Message) > 0 {
log.Infof("API response: %s", message.Message)
}

View file

@ -18,6 +18,7 @@ import (
"golang.org/x/oauth2/google"
"google.golang.org/api/dns/v1"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
)
const (
@ -139,8 +140,11 @@ func NewDNSProviderConfig(config *Config) (*DNSProvider, error) {
if config == nil {
return nil, errors.New("googlecloud: the configuration of the DNS provider is nil")
}
if config.HTTPClient == nil {
return nil, fmt.Errorf("googlecloud: unable to create Google Cloud DNS service: client is nil")
}
svc, err := dns.New(config.HTTPClient)
svc, err := dns.NewService(context.Background(), option.WithHTTPClient(config.HTTPClient))
if err != nil {
return nil, fmt.Errorf("googlecloud: unable to create Google Cloud DNS service: %v", err)
}
@ -306,7 +310,13 @@ func (d *DNSProvider) getHostedZone(domain string) (string, error) {
return "", fmt.Errorf("no matching domain found for domain %s", authZone)
}
return zones.ManagedZones[0].Name, nil
for _, z := range zones.ManagedZones {
if z.Visibility == "public" {
return z.Name, nil
}
}
return "", fmt.Errorf("no public zone found for domain %s", authZone)
}
func (d *DNSProvider) findTxtRecords(zone, fqdn string) ([]*dns.ResourceRecordSet, error) {

View file

@ -0,0 +1,106 @@
package sakuracloud
import (
"fmt"
"net/http"
"strings"
"github.com/go-acme/lego/challenge/dns01"
"github.com/sacloud/libsacloud/api"
"github.com/sacloud/libsacloud/sacloud"
)
const sacloudAPILockKey = "lego/dns/sacloud"
func (d *DNSProvider) addTXTRecord(fqdn, domain, value string, ttl int) error {
sacloud.LockByKey(sacloudAPILockKey)
defer sacloud.UnlockByKey(sacloudAPILockKey)
zone, err := d.getHostedZone(domain)
if err != nil {
return fmt.Errorf("sakuracloud: %v", err)
}
name := d.extractRecordName(fqdn, zone.Name)
zone.AddRecord(zone.CreateNewRecord(name, "TXT", value, ttl))
_, err = d.client.Update(zone.ID, zone)
if err != nil {
return fmt.Errorf("sakuracloud: API call failed: %v", err)
}
return nil
}
func (d *DNSProvider) cleanupTXTRecord(fqdn, domain string) error {
sacloud.LockByKey(sacloudAPILockKey)
defer sacloud.UnlockByKey(sacloudAPILockKey)
zone, err := d.getHostedZone(domain)
if err != nil {
return fmt.Errorf("sakuracloud: %v", err)
}
records := d.findTxtRecords(fqdn, zone)
for _, record := range records {
var updRecords []sacloud.DNSRecordSet
for _, r := range zone.Settings.DNS.ResourceRecordSets {
if !(r.Name == record.Name && r.Type == record.Type && r.RData == record.RData) {
updRecords = append(updRecords, r)
}
}
zone.Settings.DNS.ResourceRecordSets = updRecords
}
_, err = d.client.Update(zone.ID, zone)
if err != nil {
return fmt.Errorf("sakuracloud: API call failed: %v", err)
}
return nil
}
func (d *DNSProvider) getHostedZone(domain string) (*sacloud.DNS, error) {
authZone, err := dns01.FindZoneByFqdn(dns01.ToFqdn(domain))
if err != nil {
return nil, err
}
zoneName := dns01.UnFqdn(authZone)
res, err := d.client.Reset().WithNameLike(zoneName).Find()
if err != nil {
if notFound, ok := err.(api.Error); ok && notFound.ResponseCode() == http.StatusNotFound {
return nil, fmt.Errorf("zone %s not found on SakuraCloud DNS: %v", zoneName, err)
}
return nil, fmt.Errorf("API call failed: %v", err)
}
for _, zone := range res.CommonServiceDNSItems {
if zone.Name == zoneName {
return &zone, nil
}
}
return nil, fmt.Errorf("zone %s not found", zoneName)
}
func (d *DNSProvider) findTxtRecords(fqdn string, zone *sacloud.DNS) []sacloud.DNSRecordSet {
recordName := d.extractRecordName(fqdn, zone.Name)
var res []sacloud.DNSRecordSet
for _, record := range zone.Settings.DNS.ResourceRecordSets {
if record.Name == recordName && record.Type == "TXT" {
res = append(res, record)
}
}
return res
}
func (d *DNSProvider) extractRecordName(fqdn, domain string) string {
name := dns01.UnFqdn(fqdn)
if idx := strings.Index(name, "."+domain); idx != -1 {
return name[:idx]
}
return name
}

View file

@ -5,13 +5,11 @@ import (
"errors"
"fmt"
"net/http"
"strings"
"time"
"github.com/go-acme/lego/challenge/dns01"
"github.com/go-acme/lego/platform/config/env"
"github.com/sacloud/libsacloud/api"
"github.com/sacloud/libsacloud/sacloud"
)
// Config is used to configure the creation of the DNSProvider
@ -21,6 +19,7 @@ type Config struct {
PropagationTimeout time.Duration
PollingInterval time.Duration
TTL int
HTTPClient *http.Client
}
// NewDefaultConfig returns a default configuration for the DNSProvider
@ -29,13 +28,16 @@ func NewDefaultConfig() *Config {
TTL: env.GetOrDefaultInt("SAKURACLOUD_TTL", dns01.DefaultTTL),
PropagationTimeout: env.GetOrDefaultSecond("SAKURACLOUD_PROPAGATION_TIMEOUT", dns01.DefaultPropagationTimeout),
PollingInterval: env.GetOrDefaultSecond("SAKURACLOUD_POLLING_INTERVAL", dns01.DefaultPollingInterval),
HTTPClient: &http.Client{
Timeout: env.GetOrDefaultSecond("SAKURACLOUD_HTTP_TIMEOUT", 10*time.Second),
},
}
}
// DNSProvider is an implementation of the acme.ChallengeProvider interface.
type DNSProvider struct {
config *Config
client *api.Client
client *api.DNSAPI
}
// NewDNSProvider returns a DNSProvider instance configured for SakuraCloud.
@ -67,58 +69,29 @@ func NewDNSProviderConfig(config *Config) (*DNSProvider, error) {
return nil, errors.New("sakuracloud: AccessSecret is missing")
}
client := api.NewClient(config.Token, config.Secret, "tk1a")
apiClient := api.NewClient(config.Token, config.Secret, "is1a")
if config.HTTPClient == nil {
apiClient.HTTPClient = http.DefaultClient
} else {
apiClient.HTTPClient = config.HTTPClient
}
return &DNSProvider{client: client, config: config}, nil
return &DNSProvider{
client: apiClient.GetDNSAPI(),
config: config,
}, nil
}
// Present creates a TXT record to fulfill the dns-01 challenge.
func (d *DNSProvider) Present(domain, token, keyAuth string) error {
fqdn, value := dns01.GetRecord(domain, keyAuth)
zone, err := d.getHostedZone(domain)
if err != nil {
return fmt.Errorf("sakuracloud: %v", err)
}
name := d.extractRecordName(fqdn, zone.Name)
zone.AddRecord(zone.CreateNewRecord(name, "TXT", value, d.config.TTL))
_, err = d.client.GetDNSAPI().Update(zone.ID, zone)
if err != nil {
return fmt.Errorf("sakuracloud: API call failed: %v", err)
}
return nil
return d.addTXTRecord(fqdn, domain, value, d.config.TTL)
}
// CleanUp removes the TXT record matching the specified parameters.
func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error {
fqdn, _ := dns01.GetRecord(domain, keyAuth)
zone, err := d.getHostedZone(domain)
if err != nil {
return fmt.Errorf("sakuracloud: %v", err)
}
records := d.findTxtRecords(fqdn, zone)
for _, record := range records {
var updRecords []sacloud.DNSRecordSet
for _, r := range zone.Settings.DNS.ResourceRecordSets {
if !(r.Name == record.Name && r.Type == record.Type && r.RData == record.RData) {
updRecords = append(updRecords, r)
}
}
zone.Settings.DNS.ResourceRecordSets = updRecords
}
_, err = d.client.GetDNSAPI().Update(zone.ID, zone)
if err != nil {
return fmt.Errorf("sakuracloud: API call failed: %v", err)
}
return nil
return d.cleanupTXTRecord(fqdn, domain)
}
// Timeout returns the timeout and interval to use when checking for DNS propagation.
@ -126,48 +99,3 @@ func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error {
func (d *DNSProvider) Timeout() (timeout, interval time.Duration) {
return d.config.PropagationTimeout, d.config.PollingInterval
}
func (d *DNSProvider) getHostedZone(domain string) (*sacloud.DNS, error) {
authZone, err := dns01.FindZoneByFqdn(dns01.ToFqdn(domain))
if err != nil {
return nil, err
}
zoneName := dns01.UnFqdn(authZone)
res, err := d.client.GetDNSAPI().WithNameLike(zoneName).Find()
if err != nil {
if notFound, ok := err.(api.Error); ok && notFound.ResponseCode() == http.StatusNotFound {
return nil, fmt.Errorf("zone %s not found on SakuraCloud DNS: %v", zoneName, err)
}
return nil, fmt.Errorf("API call failed: %v", err)
}
for _, zone := range res.CommonServiceDNSItems {
if zone.Name == zoneName {
return &zone, nil
}
}
return nil, fmt.Errorf("zone %s not found", zoneName)
}
func (d *DNSProvider) findTxtRecords(fqdn string, zone *sacloud.DNS) []sacloud.DNSRecordSet {
recordName := d.extractRecordName(fqdn, zone.Name)
var res []sacloud.DNSRecordSet
for _, record := range zone.Settings.DNS.ResourceRecordSets {
if record.Name == recordName && record.Type == "TXT" {
res = append(res, record)
}
}
return res
}
func (d *DNSProvider) extractRecordName(fqdn, domain string) string {
name := dns01.UnFqdn(fqdn)
if idx := strings.Index(name, "."+domain); idx != -1 {
return name[:idx]
}
return name
}

View file

@ -2,10 +2,11 @@ package api
import (
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"github.com/sacloud/libsacloud/sacloud/ostype"
"strings"
"time"
"github.com/sacloud/libsacloud/sacloud"
"github.com/sacloud/libsacloud/sacloud/ostype"
)
// ArchiveAPI アーカイブAPI
@ -15,25 +16,30 @@ type ArchiveAPI struct {
}
var (
archiveLatestStableCentOSTags = []string{"current-stable", "distro-centos"}
archiveLatestStableCentOS6Tags = []string{"distro-centos", "distro-ver-6.9"}
archiveLatestStableUbuntuTags = []string{"current-stable", "distro-ubuntu"}
archiveLatestStableDebianTags = []string{"current-stable", "distro-debian"}
archiveLatestStableVyOSTags = []string{"current-stable", "distro-vyos"}
archiveLatestStableCoreOSTags = []string{"current-stable", "distro-coreos"}
archiveLatestStableRancherOSTags = []string{"current-stable", "distro-rancheros"}
archiveLatestStableKusanagiTags = []string{"current-stable", "pkg-kusanagi"}
archiveLatestStableSophosUTMTags = []string{"current-stable", "pkg-sophosutm"}
archiveLatestStableFreeBSDTags = []string{"current-stable", "distro-freebsd"}
archiveLatestStableWindows2012Tags = []string{"os-windows", "distro-ver-2012.2"}
archiveLatestStableWindows2012RDSTags = []string{"os-windows", "distro-ver-2012.2", "windows-rds"}
archiveLatestStableWindows2012RDSOfficeTags = []string{"os-windows", "distro-ver-2012.2", "windows-rds", "with-office"}
archiveLatestStableWindows2016Tags = []string{"os-windows", "distro-ver-2016"}
archiveLatestStableWindows2016RDSTags = []string{"os-windows", "distro-ver-2016", "windows-rds"}
archiveLatestStableWindows2016RDSOfficeTags = []string{"os-windows", "distro-ver-2016", "windows-rds", "with-office"}
archiveLatestStableWindows2016SQLServerWeb = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2016", "edition-web"}
archiveLatestStableWindows2016SQLServerStandard = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2016", "edition-standard"}
archiveLatestStableWindows2016SQLServerStandardAll = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2016", "edition-standard", "windows-rds", "with-office"}
archiveLatestStableCentOSTags = []string{"current-stable", "distro-centos"}
archiveLatestStableCentOS6Tags = []string{"distro-centos", "distro-ver-6.10"}
archiveLatestStableUbuntuTags = []string{"current-stable", "distro-ubuntu"}
archiveLatestStableDebianTags = []string{"current-stable", "distro-debian"}
archiveLatestStableVyOSTags = []string{"current-stable", "distro-vyos"}
archiveLatestStableCoreOSTags = []string{"current-stable", "distro-coreos"}
archiveLatestStableRancherOSTags = []string{"current-stable", "distro-rancheros"}
archiveLatestStableKusanagiTags = []string{"current-stable", "pkg-kusanagi"}
archiveLatestStableSophosUTMTags = []string{"current-stable", "pkg-sophosutm"}
archiveLatestStableFreeBSDTags = []string{"current-stable", "distro-freebsd"}
archiveLatestStableNetwiserTags = []string{"current-stable", "pkg-netwiserve"}
archiveLatestStableOPNsenseTags = []string{"current-stable", "distro-opnsense"}
archiveLatestStableWindows2012Tags = []string{"os-windows", "distro-ver-2012.2"}
archiveLatestStableWindows2012RDSTags = []string{"os-windows", "distro-ver-2012.2", "windows-rds"}
archiveLatestStableWindows2012RDSOfficeTags = []string{"os-windows", "distro-ver-2012.2", "windows-rds", "with-office"}
archiveLatestStableWindows2016Tags = []string{"os-windows", "distro-ver-2016"}
archiveLatestStableWindows2016RDSTags = []string{"os-windows", "distro-ver-2016", "windows-rds"}
archiveLatestStableWindows2016RDSOfficeTags = []string{"os-windows", "distro-ver-2016", "windows-rds", "with-office"}
archiveLatestStableWindows2016SQLServerWeb = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2016", "edition-web"}
archiveLatestStableWindows2016SQLServerStandard = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2016", "edition-standard"}
archiveLatestStableWindows2016SQLServer2017Standard = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2017", "edition-standard"}
archiveLatestStableWindows2016SQLServerStandardAll = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2016", "edition-standard", "windows-rds", "with-office"}
archiveLatestStableWindows2016SQLServer2017StandardAll = []string{"os-windows", "distro-ver-2016", "windows-sqlserver", "sqlserver-2017", "edition-standard", "windows-rds", "with-office"}
archiveLatestStableWindows2019Tags = []string{"os-windows", "distro-ver-2019"}
)
// NewArchiveAPI アーカイブAPI作成
@ -48,25 +54,29 @@ func NewArchiveAPI(client *Client) *ArchiveAPI {
}
api.findFuncMapPerOSType = map[ostype.ArchiveOSTypes]func() (*sacloud.Archive, error){
ostype.CentOS: api.FindLatestStableCentOS,
ostype.CentOS6: api.FindLatestStableCentOS6,
ostype.Ubuntu: api.FindLatestStableUbuntu,
ostype.Debian: api.FindLatestStableDebian,
ostype.VyOS: api.FindLatestStableVyOS,
ostype.CoreOS: api.FindLatestStableCoreOS,
ostype.RancherOS: api.FindLatestStableRancherOS,
ostype.Kusanagi: api.FindLatestStableKusanagi,
ostype.SophosUTM: api.FindLatestStableSophosUTM,
ostype.FreeBSD: api.FindLatestStableFreeBSD,
ostype.Windows2012: api.FindLatestStableWindows2012,
ostype.Windows2012RDS: api.FindLatestStableWindows2012RDS,
ostype.Windows2012RDSOffice: api.FindLatestStableWindows2012RDSOffice,
ostype.Windows2016: api.FindLatestStableWindows2016,
ostype.Windows2016RDS: api.FindLatestStableWindows2016RDS,
ostype.Windows2016RDSOffice: api.FindLatestStableWindows2016RDSOffice,
ostype.Windows2016SQLServerWeb: api.FindLatestStableWindows2016SQLServerWeb,
ostype.Windows2016SQLServerStandard: api.FindLatestStableWindows2016SQLServerStandard,
ostype.Windows2016SQLServerStandardAll: api.FindLatestStableWindows2016SQLServerStandardAll,
ostype.CentOS: api.FindLatestStableCentOS,
ostype.CentOS6: api.FindLatestStableCentOS6,
ostype.Ubuntu: api.FindLatestStableUbuntu,
ostype.Debian: api.FindLatestStableDebian,
ostype.VyOS: api.FindLatestStableVyOS,
ostype.CoreOS: api.FindLatestStableCoreOS,
ostype.RancherOS: api.FindLatestStableRancherOS,
ostype.Kusanagi: api.FindLatestStableKusanagi,
ostype.SophosUTM: api.FindLatestStableSophosUTM,
ostype.FreeBSD: api.FindLatestStableFreeBSD,
ostype.Netwiser: api.FindLatestStableNetwiser,
ostype.OPNsense: api.FindLatestStableOPNsense,
ostype.Windows2012: api.FindLatestStableWindows2012,
ostype.Windows2012RDS: api.FindLatestStableWindows2012RDS,
ostype.Windows2012RDSOffice: api.FindLatestStableWindows2012RDSOffice,
ostype.Windows2016: api.FindLatestStableWindows2016,
ostype.Windows2016RDS: api.FindLatestStableWindows2016RDS,
ostype.Windows2016RDSOffice: api.FindLatestStableWindows2016RDSOffice,
ostype.Windows2016SQLServerWeb: api.FindLatestStableWindows2016SQLServerWeb,
ostype.Windows2016SQLServerStandard: api.FindLatestStableWindows2016SQLServerStandard,
ostype.Windows2016SQLServer2017Standard: api.FindLatestStableWindows2016SQLServer2017Standard,
ostype.Windows2016SQLServerStandardAll: api.FindLatestStableWindows2016SQLServerStandardAll,
ostype.Windows2016SQLServer2017StandardAll: api.FindLatestStableWindows2016SQLServer2017StandardAll,
}
return api
@ -137,6 +147,14 @@ func (api *ArchiveAPI) CanEditDisk(id int64) (bool, error) {
if archive.HasTag("pkg-sophosutm") || archive.IsSophosUTM() {
return false, nil
}
// OPNsenseであれば編集不可
if archive.HasTag("distro-opnsense") {
return false, nil
}
// Netwiser VEであれば編集不可
if archive.HasTag("pkg-netwiserve") {
return false, nil
}
for _, t := range allowDiskEditTags {
if archive.HasTag(t) {
@ -180,6 +198,14 @@ func (api *ArchiveAPI) GetPublicArchiveIDFromAncestors(id int64) (int64, bool) {
if archive.HasTag("pkg-sophosutm") || archive.IsSophosUTM() {
return emptyID, false
}
// OPNsenseであれば編集不可
if archive.HasTag("distro-opnsense") {
return emptyID, false
}
// Netwiser VEであれば編集不可
if archive.HasTag("pkg-netwiserve") {
return emptyID, false
}
for _, t := range allowDiskEditTags {
if archive.HasTag(t) {
@ -249,6 +275,16 @@ func (api *ArchiveAPI) FindLatestStableFreeBSD() (*sacloud.Archive, error) {
return api.findByOSTags(archiveLatestStableFreeBSDTags)
}
// FindLatestStableNetwiser 安定版最新のNetwiserパブリックアーカイブを取得
func (api *ArchiveAPI) FindLatestStableNetwiser() (*sacloud.Archive, error) {
return api.findByOSTags(archiveLatestStableNetwiserTags)
}
// FindLatestStableOPNsense 安定版最新のOPNsenseパブリックアーカイブを取得
func (api *ArchiveAPI) FindLatestStableOPNsense() (*sacloud.Archive, error) {
return api.findByOSTags(archiveLatestStableOPNsenseTags)
}
// FindLatestStableWindows2012 安定版最新のWindows2012パブリックアーカイブを取得
func (api *ArchiveAPI) FindLatestStableWindows2012() (*sacloud.Archive, error) {
return api.findByOSTags(archiveLatestStableWindows2012Tags, map[string]interface{}{
@ -305,13 +341,34 @@ func (api *ArchiveAPI) FindLatestStableWindows2016SQLServerStandard() (*sacloud.
})
}
// FindLatestStableWindows2016SQLServer2017Standard 安定版最新のWindows2016 SQLServer2017(Standard) パブリックアーカイブを取得
func (api *ArchiveAPI) FindLatestStableWindows2016SQLServer2017Standard() (*sacloud.Archive, error) {
return api.findByOSTags(archiveLatestStableWindows2016SQLServer2017Standard, map[string]interface{}{
"Name": "Windows Server 2016 for MS SQL 2017(Standard)",
})
}
// FindLatestStableWindows2016SQLServerStandardAll 安定版最新のWindows2016 SQLServer(RDS+Office) パブリックアーカイブを取得
func (api *ArchiveAPI) FindLatestStableWindows2016SQLServerStandardAll() (*sacloud.Archive, error) {
return api.findByOSTags(archiveLatestStableWindows2016SQLServerStandard, map[string]interface{}{
return api.findByOSTags(archiveLatestStableWindows2016SQLServerStandardAll, map[string]interface{}{
"Name": "Windows Server 2016 for MS SQL 2016(Std) with RDS / MS Office",
})
}
// FindLatestStableWindows2016SQLServer2017StandardAll 安定版最新のWindows2016 SQLServer2017(RDS+Office) パブリックアーカイブを取得
func (api *ArchiveAPI) FindLatestStableWindows2016SQLServer2017StandardAll() (*sacloud.Archive, error) {
return api.findByOSTags(archiveLatestStableWindows2016SQLServer2017StandardAll, map[string]interface{}{
"Name": "Windows Server 2016 for MS SQL 2017(Std) with RDS / MS Office",
})
}
// FindLatestStableWindows2019 安定版最新のWindows2019パブリックアーカイブを取得
func (api *ArchiveAPI) FindLatestStableWindows2019() (*sacloud.Archive, error) {
return api.findByOSTags(archiveLatestStableWindows2019Tags, map[string]interface{}{
"Name": "Windows Server 2019 Datacenter Edition",
})
}
// FindByOSType 指定のOS種別の安定版最新のパブリックアーカイブを取得
func (api *ArchiveAPI) FindByOSType(os ostype.ArchiveOSTypes) (*sacloud.Archive, error) {
if f, ok := api.findFuncMapPerOSType[os]; ok {

View file

@ -2,6 +2,7 @@ package api
import (
"encoding/json"
"github.com/sacloud/libsacloud/sacloud"
)

View file

@ -1,8 +1,8 @@
package api
import (
"encoding/json"
// "strings"
"encoding/json" // "strings"
"github.com/sacloud/libsacloud/sacloud"
)

View file

@ -3,8 +3,9 @@ package api
import (
"encoding/json"
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"net/url"
"github.com/sacloud/libsacloud/sacloud"
)
type baseAPI struct {
@ -137,7 +138,7 @@ func (api *baseAPI) filterBy(key string, value interface{}, multiple bool) *base
if f, ok := state.Filter[key]; ok {
if s, ok := f.(string); ok && s != "" {
if v, ok := value.(string); ok {
state.Filter[key] = fmt.Sprintf("%s %s", s, v)
state.Filter[key] = fmt.Sprintf("%s%%20%s", s, v)
return
}
}

View file

@ -4,10 +4,11 @@ import (
"encoding/csv"
"encoding/json"
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"io"
"strings"
"time"
"github.com/sacloud/libsacloud/sacloud"
)
// BillAPI 請求情報API

View file

@ -2,8 +2,9 @@ package api
import (
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"time"
"github.com/sacloud/libsacloud/sacloud"
)
// CDROMAPI ISOイメージAPI

View file

@ -4,14 +4,15 @@ import (
"bytes"
"encoding/json"
"fmt"
"github.com/sacloud/libsacloud"
"github.com/sacloud/libsacloud/sacloud"
"io"
"io/ioutil"
"log"
"net/http"
"strings"
"time"
"github.com/sacloud/libsacloud"
"github.com/sacloud/libsacloud/sacloud"
)
var (
@ -44,6 +45,8 @@ type Client struct {
RetryMax int
// 503エラー時のリトライ待ち時間
RetryInterval time.Duration
// APIコール時に利用される*http.Client 未指定の場合http.DefaultClientが利用される
HTTPClient *http.Client
}
// NewClient APIクライアント作成
@ -73,8 +76,11 @@ func (c *Client) Clone() *Client {
DefaultTimeoutDuration: c.DefaultTimeoutDuration,
UserAgent: c.UserAgent,
AcceptLanguage: c.AcceptLanguage,
RequestTracer: c.RequestTracer,
ResponseTracer: c.ResponseTracer,
RetryMax: c.RetryMax,
RetryInterval: c.RetryInterval,
HTTPClient: c.HTTPClient,
}
n.API = newAPI(n)
return n
@ -111,6 +117,7 @@ func (c *Client) isOkStatus(code int) bool {
func (c *Client) newRequest(method, uri string, body interface{}) ([]byte, error) {
var (
client = &retryableHTTPClient{
Client: c.HTTPClient,
retryMax: c.RetryMax,
retryInterval: c.RetryInterval,
}
@ -232,12 +239,15 @@ func newRequest(method, url string, body io.ReadSeeker) (*request, error) {
}
type retryableHTTPClient struct {
http.Client
*http.Client
retryInterval time.Duration
retryMax int
}
func (c *retryableHTTPClient) Do(req *request) (*http.Response, error) {
if c.Client == nil {
c.Client = http.DefaultClient
}
for i := 0; ; i++ {
if req.body != nil {
@ -277,6 +287,7 @@ type API struct {
Bill *BillAPI // 請求情報API
Bridge *BridgeAPI // ブリッジAPi
CDROM *CDROMAPI // ISOイメージAPI
Coupon *CouponAPI // クーポンAPI
Database *DatabaseAPI // データベースAPI
Disk *DiskAPI // ディスクAPI
DNS *DNSAPI // DNS API
@ -295,6 +306,7 @@ type API struct {
NFS *NFSAPI // NFS API
Note *NoteAPI // スタートアップスクリプトAPI
PacketFilter *PacketFilterAPI // パケットフィルタAPI
ProxyLB *ProxyLBAPI // プロキシLBAPI
PrivateHost *PrivateHostAPI // 専有ホストAPI
Product *ProductAPI // 製品情報API
Server *ServerAPI // サーバーAPI
@ -337,6 +349,11 @@ func (api *API) GetCDROMAPI() *CDROMAPI {
return api.CDROM
}
// GetCouponAPI クーポン情報API取得
func (api *API) GetCouponAPI() *CouponAPI {
return api.Coupon
}
// GetDatabaseAPI データベースAPI取得
func (api *API) GetDatabaseAPI() *DatabaseAPI {
return api.Database
@ -432,6 +449,11 @@ func (api *API) GetPacketFilterAPI() *PacketFilterAPI {
return api.PacketFilter
}
// GetProxyLBAPI プロキシLBAPI取得
func (api *API) GetProxyLBAPI() *ProxyLBAPI {
return api.ProxyLB
}
// GetPrivateHostAPI 専有ホストAPI取得
func (api *API) GetPrivateHostAPI() *PrivateHostAPI {
return api.PrivateHost
@ -566,6 +588,7 @@ func newAPI(client *Client) *API {
Bill: NewBillAPI(client),
Bridge: NewBridgeAPI(client),
CDROM: NewCDROMAPI(client),
Coupon: NewCouponAPI(client),
Database: NewDatabaseAPI(client),
Disk: NewDiskAPI(client),
DNS: NewDNSAPI(client),
@ -587,6 +610,7 @@ func newAPI(client *Client) *API {
NFS: NewNFSAPI(client),
Note: NewNoteAPI(client),
PacketFilter: NewPacketFilterAPI(client),
ProxyLB: NewProxyLBAPI(client),
PrivateHost: NewPrivateHostAPI(client),
Product: &ProductAPI{
Server: NewProductServerAPI(client),

59
vendor/github.com/sacloud/libsacloud/api/coupon.go generated vendored Normal file
View file

@ -0,0 +1,59 @@
package api
import (
"encoding/json"
"fmt"
"github.com/sacloud/libsacloud/sacloud"
)
// CouponAPI クーポン情報API
type CouponAPI struct {
*baseAPI
}
// NewCouponAPI クーポン情報API作成
func NewCouponAPI(client *Client) *CouponAPI {
return &CouponAPI{
&baseAPI{
client: client,
apiRootSuffix: sakuraBillingAPIRootSuffix,
FuncGetResourceURL: func() string {
return "coupon"
},
},
}
}
// CouponResponse クーポン情報レスポンス
type CouponResponse struct {
*sacloud.ResultFlagValue
// AllCount 件数
AllCount int `json:",omitempty"`
// CountPerPage ページあたり件数
CountPerPage int `json:",omitempty"`
// Page 現在のページ番号
Page int `json:",omitempty"`
// Coupons クーポン情報 リスト
Coupons []*sacloud.Coupon
}
// Find クーポン情報 全件取得
func (api *CouponAPI) Find() ([]*sacloud.Coupon, error) {
authStatus, err := api.client.AuthStatus.Read()
if err != nil {
return nil, err
}
accountID := authStatus.Account.GetStrID()
uri := fmt.Sprintf("%s/%s", api.getResourceURL(), accountID)
data, err := api.client.newRequest("GET", uri, nil)
if err != nil {
return nil, err
}
var res CouponResponse
if err := json.Unmarshal(data, &res); err != nil {
return nil, err
}
return res.Coupons, nil
}

View file

@ -3,8 +3,9 @@ package api
import (
"encoding/json"
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"time"
"github.com/sacloud/libsacloud/sacloud"
)
//HACK: さくらのAPI側仕様: Applianceの内容によってJSONフォーマットが異なるため

View file

@ -2,8 +2,9 @@ package api
import (
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"time"
"github.com/sacloud/libsacloud/sacloud"
)
var (
@ -56,7 +57,50 @@ func (api *DiskAPI) Create(value *sacloud.Disk) (*sacloud.Disk, error) {
Success string `json:",omitempty"`
}
res := &diskResponse{}
err := api.create(api.createRequest(value), res)
rawBody := &sacloud.Request{}
rawBody.Disk = value
if len(value.DistantFrom) > 0 {
rawBody.DistantFrom = value.DistantFrom
value.DistantFrom = []int64{}
}
err := api.create(rawBody, res)
if err != nil {
return nil, err
}
return res.Disk, nil
}
// CreateWithConfig ディスク作成とディスクの修正、サーバ起動(指定されていれば)を回のAPI呼び出しで実行
func (api *DiskAPI) CreateWithConfig(value *sacloud.Disk, config *sacloud.DiskEditValue, bootAtAvailable bool) (*sacloud.Disk, error) {
//HACK: さくらのAPI側仕様: 戻り値:Successがbool値へ変換できないため文字列で受ける("Accepted"などが返る)
type diskResponse struct {
*sacloud.Response
// Success
Success string `json:",omitempty"`
}
res := &diskResponse{}
type diskRequest struct {
*sacloud.Request
Config *sacloud.DiskEditValue `json:",omitempty"`
BootAtAvailable bool `json:",omitempty"`
}
rawBody := &diskRequest{
Request: &sacloud.Request{},
BootAtAvailable: bootAtAvailable,
}
rawBody.Disk = value
rawBody.Config = config
if len(value.DistantFrom) > 0 {
rawBody.DistantFrom = value.DistantFrom
value.DistantFrom = []int64{}
}
err := api.create(rawBody, res)
if err != nil {
return nil, err
}
@ -90,7 +134,14 @@ func (api *DiskAPI) install(id int64, body *sacloud.Disk) (bool, error) {
Success string `json:",omitempty"`
}
res := &diskResponse{}
err := api.baseAPI.request(method, uri, body, res)
rawBody := &sacloud.Request{}
rawBody.Disk = body
if len(body.DistantFrom) > 0 {
rawBody.DistantFrom = body.DistantFrom
body.DistantFrom = []int64{}
}
err := api.baseAPI.request(method, uri, rawBody, res)
if err != nil {
return false, err
}
@ -213,6 +264,14 @@ func (api *DiskAPI) CanEditDisk(id int64) (bool, error) {
if disk.HasTag("pkg-sophosutm") || disk.IsSophosUTM() {
return false, nil
}
// OPNsenseであれば編集不可
if disk.HasTag("distro-opnsense") {
return false, nil
}
// Netwiser VEであれば編集不可
if disk.HasTag("pkg-netwiserve") {
return false, nil
}
// ソースアーカイブ/ソースディスクともに持っていない場合
if disk.SourceArchive == nil && disk.SourceDisk == nil {
@ -263,6 +322,14 @@ func (api *DiskAPI) GetPublicArchiveIDFromAncestors(id int64) (int64, bool) {
if disk.HasTag("pkg-sophosutm") || disk.IsSophosUTM() {
return emptyID, false
}
// OPNsenseであれば編集不可
if disk.HasTag("distro-opnsense") {
return emptyID, false
}
// Netwiser VEであれば編集不可
if disk.HasTag("pkg-netwiserve") {
return emptyID, false
}
for _, t := range allowDiskEditTags {
if disk.HasTag(t) {

View file

@ -2,8 +2,9 @@ package api
import (
"encoding/json"
"github.com/sacloud/libsacloud/sacloud"
"strings"
"github.com/sacloud/libsacloud/sacloud"
)
//HACK: さくらのAPI側仕様: CommonServiceItemsの内容によってJSONフォーマットが異なるため

View file

@ -2,6 +2,7 @@ package api
import (
"fmt"
"github.com/sacloud/libsacloud/sacloud"
)

View file

@ -1,8 +1,8 @@
package api
import (
"encoding/json"
// "strings"
"encoding/json" // "strings"
"github.com/sacloud/libsacloud/sacloud"
)

View file

@ -2,6 +2,7 @@ package api
import (
"fmt"
"github.com/sacloud/libsacloud/sacloud"
)
@ -81,3 +82,26 @@ func (api *InterfaceAPI) DisconnectFromPacketFilter(interfaceID int64) (bool, er
)
return api.modify(method, uri, nil)
}
// SetDisplayIPAddress 表示用IPアドレス 設定
func (api *InterfaceAPI) SetDisplayIPAddress(interfaceID int64, ipaddress string) (bool, error) {
var (
method = "PUT"
uri = fmt.Sprintf("/%s/%d", api.getResourceURL(), interfaceID)
)
body := map[string]interface{}{
"Interface": map[string]string{
"UserIPAddress": ipaddress,
},
}
return api.modify(method, uri, body)
}
// DeleteDisplayIPAddress 表示用IPアドレス 削除
func (api *InterfaceAPI) DeleteDisplayIPAddress(interfaceID int64) (bool, error) {
var (
method = "DELETE"
uri = fmt.Sprintf("/%s/%d", api.getResourceURL(), interfaceID)
)
return api.modify(method, uri, nil)
}

View file

@ -2,8 +2,9 @@ package api
import (
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"time"
"github.com/sacloud/libsacloud/sacloud"
)
// InternetAPI ルーターAPI

View file

@ -2,6 +2,7 @@ package api
import (
"fmt"
"github.com/sacloud/libsacloud/sacloud"
)

View file

@ -2,6 +2,7 @@ package api
import (
"fmt"
"github.com/sacloud/libsacloud/sacloud"
)

View file

@ -3,8 +3,9 @@ package api
import (
"encoding/json"
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"time"
"github.com/sacloud/libsacloud/sacloud"
)
//HACK: さくらのAPI側仕様: Applianceの内容によってJSONフォーマットが異なるため
@ -38,6 +39,12 @@ type loadBalancerResponse struct {
Success interface{} `json:",omitempty"` //HACK: さくらのAPI側仕様: 戻り値:Successがbool値へ変換できないためinterface{}
}
type loadBalancerStatusResponse struct {
*sacloud.ResultFlagValue
Success interface{} `json:",omitempty"` //HACK: さくらのAPI側仕様: 戻り値:Successがbool値へ変換できないためinterface{}
LoadBalancer *sacloud.LoadBalancerStatusResult `json:",omitempty"`
}
// LoadBalancerAPI ロードバランサーAPI
type LoadBalancerAPI struct {
*baseAPI
@ -230,3 +237,20 @@ func (api *LoadBalancerAPI) AsyncSleepWhileCopying(id int64, timeout time.Durati
func (api *LoadBalancerAPI) Monitor(id int64, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) {
return api.baseAPI.applianceMonitorBy(id, "interface", 0, body)
}
// Status ステータス取得
func (api *LoadBalancerAPI) Status(id int64) (*sacloud.LoadBalancerStatusResult, error) {
var (
method = "GET"
uri = fmt.Sprintf("%s/%d/status", api.getResourceURL(), id)
res = &loadBalancerStatusResponse{}
)
err := api.baseAPI.request(method, uri, nil, res)
if err != nil {
return nil, err
}
if res.LoadBalancer == nil {
return nil, nil
}
return res.LoadBalancer, nil
}

View file

@ -3,8 +3,9 @@ package api
import (
"encoding/json"
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"time"
"github.com/sacloud/libsacloud/sacloud"
)
// SearchMobileGatewayResponse モバイルゲートウェイ検索レスポンス
@ -41,6 +42,14 @@ type mobileGatewaySIMResponse struct {
Success interface{} `json:",omitempty"` //HACK: さくらのAPI側仕様: 戻り値:Successがbool値へ変換できないためinterface{}
}
type trafficMonitoringBody struct {
TrafficMonitoring *sacloud.TrafficMonitoringConfig `json:"traffic_monitoring_config"`
}
type trafficStatusBody struct {
TrafficStatus *sacloud.TrafficStatus `json:"traffic_status"`
}
// MobileGatewayAPI モバイルゲートウェイAPI
type MobileGatewayAPI struct {
*baseAPI
@ -322,8 +331,8 @@ func (api *MobileGatewayAPI) AddSIMRoute(id int64, simID int64, prefix string) (
param := &sacloud.MobileGatewaySIMRoutes{
SIMRoutes: routes,
}
added := param.AddSIMRoute(simID, prefix)
if !added {
index, added := param.AddSIMRoute(simID, prefix)
if index < 0 || added == nil {
return false, nil
}
@ -412,3 +421,60 @@ func (api *MobileGatewayAPI) Logs(id int64, body interface{}) ([]sacloud.SIMLog,
}
return res.Logs, nil
}
// GetTrafficMonitoringConfig トラフィックコントロール 取得
func (api *MobileGatewayAPI) GetTrafficMonitoringConfig(id int64) (*sacloud.TrafficMonitoringConfig, error) {
var (
method = "GET"
uri = fmt.Sprintf("%s/%d/mobilegateway/traffic_monitoring", api.getResourceURL(), id)
)
res := &trafficMonitoringBody{}
err := api.baseAPI.request(method, uri, nil, res)
if err != nil {
return nil, err
}
return res.TrafficMonitoring, nil
}
// SetTrafficMonitoringConfig トラフィックコントロール 設定
func (api *MobileGatewayAPI) SetTrafficMonitoringConfig(id int64, trafficMonConfig *sacloud.TrafficMonitoringConfig) (bool, error) {
var (
method = "PUT"
uri = fmt.Sprintf("%s/%d/mobilegateway/traffic_monitoring", api.getResourceURL(), id)
)
req := &trafficMonitoringBody{
TrafficMonitoring: trafficMonConfig,
}
return api.modify(method, uri, req)
}
// DisableTrafficMonitoringConfig トラフィックコントロール 解除
func (api *MobileGatewayAPI) DisableTrafficMonitoringConfig(id int64) (bool, error) {
var (
method = "DELETE"
uri = fmt.Sprintf("%s/%d/mobilegateway/traffic_monitoring", api.getResourceURL(), id)
)
return api.modify(method, uri, nil)
}
// GetTrafficStatus 当月通信量 取得
func (api *MobileGatewayAPI) GetTrafficStatus(id int64) (*sacloud.TrafficStatus, error) {
var (
method = "GET"
uri = fmt.Sprintf("%s/%d/mobilegateway/traffic_status", api.getResourceURL(), id)
)
res := &trafficStatusBody{}
err := api.baseAPI.request(method, uri, nil, res)
if err != nil {
return nil, err
}
return res.TrafficStatus, nil
}
// MonitorBy 指定位置のインターフェースのアクティビティーモニター取得
func (api *MobileGatewayAPI) MonitorBy(id int64, nicIndex int, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) {
return api.baseAPI.applianceMonitorBy(id, "interface", nicIndex, body)
}

View file

@ -2,6 +2,7 @@ package api
import (
"encoding/json"
"github.com/sacloud/libsacloud/sacloud"
)

View file

@ -2,9 +2,11 @@ package api
import (
"encoding/json"
"errors"
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"time"
"github.com/sacloud/libsacloud/sacloud"
)
// SearchNFSResponse NFS検索レスポンス
@ -94,6 +96,58 @@ func (api *NFSAPI) Create(value *sacloud.NFS) (*sacloud.NFS, error) {
})
}
// CreateWithPlan プラン/サイズを指定してNFSを作成
func (api *NFSAPI) CreateWithPlan(value *sacloud.CreateNFSValue, plan sacloud.NFSPlan, size sacloud.NFSSize) (*sacloud.NFS, error) {
nfs := sacloud.NewNFS(value)
// get plan
plans, err := api.GetNFSPlans()
if err != nil {
return nil, err
}
if plans == nil {
return nil, errors.New("NFS plans not found")
}
planID := plans.FindPlanID(plan, size)
if planID < 0 {
return nil, errors.New("NFS plans not found")
}
nfs.Plan = sacloud.NewResource(planID)
nfs.Remark.SetRemarkPlanID(planID)
return api.request(func(res *nfsResponse) error {
return api.create(api.createRequest(nfs), res)
})
}
// GetNFSPlans プラン一覧取得
func (api *NFSAPI) GetNFSPlans() (*sacloud.NFSPlans, error) {
notes, err := api.client.Note.Reset().Find()
if err != nil {
return nil, err
}
for _, note := range notes.Notes {
if note.Class == sacloud.ENoteClass("json") && note.Name == "sys-nfs" {
rawPlans := note.Content
var plans struct {
Plans *sacloud.NFSPlans `json:"plans"`
}
err := json.Unmarshal([]byte(rawPlans), &plans)
if err != nil {
return nil, err
}
return plans.Plans, nil
}
}
return nil, nil
}
// Read 読み取り
func (api *NFSAPI) Read(id int64) (*sacloud.NFS, error) {
return api.request(func(res *nfsResponse) error {
@ -223,9 +277,9 @@ func (api *NFSAPI) AsyncSleepWhileCopying(id int64, timeout time.Duration, maxRe
return poll(handler, timeout)
}
// MonitorNFS NFS固有項目アクティビティモニター取得
func (api *NFSAPI) MonitorNFS(id int64, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) {
return api.baseAPI.applianceMonitorBy(id, "nfs", 0, body)
// MonitorFreeDiskSize NFSディスク残量アクティビティモニター取得
func (api *NFSAPI) MonitorFreeDiskSize(id int64, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) {
return api.baseAPI.applianceMonitorBy(id, "database", 0, body)
}
// MonitorInterface NICアクティビティーモニター取得

View file

@ -21,16 +21,16 @@ func poll(handler pollingHandler, timeout time.Duration) (chan (interface{}), ch
select {
case <-tick:
exit, state, err := handler()
if state != nil {
progChan <- state
}
if err != nil {
errChan <- fmt.Errorf("Failed: poll: %s", err)
return
}
if state != nil {
progChan <- state
if exit {
compChan <- state
return
}
if exit {
compChan <- state
return
}
case <-bomb:
errChan <- fmt.Errorf("Timeout")
@ -65,9 +65,9 @@ type hasFailed interface {
func waitingForAvailableFunc(readFunc func() (hasAvailable, error), maxRetry int) func() (bool, interface{}, error) {
counter := 0
return func() (bool, interface{}, error) {
counter++
v, err := readFunc()
if err != nil {
counter++
if maxRetry > 0 && counter < maxRetry {
return false, nil, nil
}
@ -96,9 +96,9 @@ type hasUpDown interface {
func waitingForUpFunc(readFunc func() (hasUpDown, error), maxRetry int) func() (bool, interface{}, error) {
counter := 0
return func() (bool, interface{}, error) {
counter++
v, err := readFunc()
if err != nil {
counter++
if maxRetry > 0 && counter < maxRetry {
return false, nil, nil
}
@ -118,9 +118,9 @@ func waitingForUpFunc(readFunc func() (hasUpDown, error), maxRetry int) func() (
func waitingForDownFunc(readFunc func() (hasUpDown, error), maxRetry int) func() (bool, interface{}, error) {
counter := 0
return func() (bool, interface{}, error) {
counter++
v, err := readFunc()
if err != nil {
counter++
if maxRetry > 0 && counter < maxRetry {
return false, nil, nil
}
@ -140,9 +140,9 @@ func waitingForDownFunc(readFunc func() (hasUpDown, error), maxRetry int) func()
func waitingForReadFunc(readFunc func() (interface{}, error), maxRetry int) func() (bool, interface{}, error) {
counter := 0
return func() (bool, interface{}, error) {
counter++
v, err := readFunc()
if err != nil {
counter++
if maxRetry > 0 && counter < maxRetry {
return false, nil, nil
}

View file

@ -2,8 +2,8 @@ package api
import (
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"strconv"
)
// ProductServerAPI サーバープランAPI
@ -24,48 +24,50 @@ func NewProductServerAPI(client *Client) *ProductServerAPI {
}
}
func (api *ProductServerAPI) getPlanIDBySpec(core int, memGB int) (int64, error) {
//assert args
if core <= 0 {
return -1, fmt.Errorf("Invalid Parameter: CPU Core")
}
if memGB <= 0 {
return -1, fmt.Errorf("Invalid Parameter: Memory Size(GB)")
}
return strconv.ParseInt(fmt.Sprintf("%d%03d", memGB, core), 10, 64)
}
// IsValidPlan 指定のコア数/メモリサイズのプランが存在し、有効であるか判定
func (api *ProductServerAPI) IsValidPlan(core int, memGB int) (bool, error) {
planID, err := api.getPlanIDBySpec(core, memGB)
if err != nil {
return false, err
}
productServer, err := api.Read(planID)
if err != nil {
return false, err
}
if productServer != nil {
return true, nil
}
return false, fmt.Errorf("Server Plan[%d] Not Found", planID)
}
// GetBySpec 指定のコア数/メモリサイズのサーバープランを取得
func (api *ProductServerAPI) GetBySpec(core int, memGB int) (*sacloud.ProductServer, error) {
planID, err := api.getPlanIDBySpec(core, memGB)
productServer, err := api.Read(planID)
// GetBySpec 指定のコア数/メモリサイズ/世代のプランを取得
func (api *ProductServerAPI) GetBySpec(core int, memGB int, gen sacloud.PlanGenerations) (*sacloud.ProductServer, error) {
plans, err := api.Reset().Find()
if err != nil {
return nil, err
}
var res sacloud.ProductServer
var found bool
for _, plan := range plans.ServerPlans {
if plan.CPU == core && plan.GetMemoryGB() == memGB {
if gen == sacloud.PlanDefault || gen == plan.Generation {
// PlanDefaultの場合は複数ヒットしうる。
// この場合より新しい世代を優先する。
if found && plan.Generation <= res.Generation {
continue
}
res = plan
found = true
}
}
}
return productServer, nil
if !found {
return nil, fmt.Errorf("Server Plan[core:%d, memory:%d, gen:%d] is not found", core, memGB, gen)
}
return &res, nil
}
// IsValidPlan 指定のコア数/メモリサイズ/世代のプランが存在し、有効であるか判定
func (api *ProductServerAPI) IsValidPlan(core int, memGB int, gen sacloud.PlanGenerations) (bool, error) {
productServer, err := api.GetBySpec(core, memGB, gen)
if err != nil {
return false, err
}
if productServer == nil {
return false, fmt.Errorf("Server Plan[core:%d, memory:%d, gen:%d] is not found", core, memGB, gen)
}
if productServer.Availability != sacloud.EAAvailable {
return false, fmt.Errorf("Server Plan[core:%d, memory:%d, gen:%d] is not available", core, memGB, gen)
}
return true, nil
}

224
vendor/github.com/sacloud/libsacloud/api/proxylb.go generated vendored Normal file
View file

@ -0,0 +1,224 @@
package api
import (
"encoding/json" // "strings"
"fmt"
"github.com/sacloud/libsacloud/sacloud"
)
//HACK: さくらのAPI側仕様: CommonServiceItemsの内容によってJSONフォーマットが異なるため
// DNS/ProxyLB/シンプル監視それぞれでリクエスト/レスポンスデータ型を定義する。
// SearchProxyLBResponse ProxyLB検索レスポンス
type SearchProxyLBResponse struct {
// Total 総件数
Total int `json:",omitempty"`
// From ページング開始位置
From int `json:",omitempty"`
// Count 件数
Count int `json:",omitempty"`
// CommonServiceProxyLBItems ProxyLBリスト
CommonServiceProxyLBItems []sacloud.ProxyLB `json:"CommonServiceItems,omitempty"`
}
type proxyLBRequest struct {
CommonServiceProxyLBItem *sacloud.ProxyLB `json:"CommonServiceItem,omitempty"`
From int `json:",omitempty"`
Count int `json:",omitempty"`
Sort []string `json:",omitempty"`
Filter map[string]interface{} `json:",omitempty"`
Exclude []string `json:",omitempty"`
Include []string `json:",omitempty"`
}
type proxyLBResponse struct {
*sacloud.ResultFlagValue
*sacloud.ProxyLB `json:"CommonServiceItem,omitempty"`
}
// ProxyLBAPI ProxyLB API
type ProxyLBAPI struct {
*baseAPI
}
// NewProxyLBAPI ProxyLB API作成
func NewProxyLBAPI(client *Client) *ProxyLBAPI {
return &ProxyLBAPI{
&baseAPI{
client: client,
FuncGetResourceURL: func() string {
return "commonserviceitem"
},
FuncBaseSearchCondition: func() *sacloud.Request {
res := &sacloud.Request{}
res.AddFilter("Provider.Class", "proxylb")
return res
},
},
}
}
// Find 検索
func (api *ProxyLBAPI) Find() (*SearchProxyLBResponse, error) {
data, err := api.client.newRequest("GET", api.getResourceURL(), api.getSearchState())
if err != nil {
return nil, err
}
var res SearchProxyLBResponse
if err := json.Unmarshal(data, &res); err != nil {
return nil, err
}
return &res, nil
}
func (api *ProxyLBAPI) request(f func(*proxyLBResponse) error) (*sacloud.ProxyLB, error) {
res := &proxyLBResponse{}
err := f(res)
if err != nil {
return nil, err
}
return res.ProxyLB, nil
}
func (api *ProxyLBAPI) createRequest(value *sacloud.ProxyLB) *proxyLBResponse {
return &proxyLBResponse{ProxyLB: value}
}
// New 新規作成用パラメーター作成
func (api *ProxyLBAPI) New(name string) *sacloud.ProxyLB {
return sacloud.CreateNewProxyLB(name)
}
// Create 新規作成
func (api *ProxyLBAPI) Create(value *sacloud.ProxyLB) (*sacloud.ProxyLB, error) {
return api.request(func(res *proxyLBResponse) error {
return api.create(api.createRequest(value), res)
})
}
// Read 読み取り
func (api *ProxyLBAPI) Read(id int64) (*sacloud.ProxyLB, error) {
return api.request(func(res *proxyLBResponse) error {
return api.read(id, nil, res)
})
}
// Update 更新
func (api *ProxyLBAPI) Update(id int64, value *sacloud.ProxyLB) (*sacloud.ProxyLB, error) {
return api.request(func(res *proxyLBResponse) error {
return api.update(id, api.createRequest(value), res)
})
}
// UpdateSetting 設定更新
func (api *ProxyLBAPI) UpdateSetting(id int64, value *sacloud.ProxyLB) (*sacloud.ProxyLB, error) {
req := &sacloud.ProxyLB{
// Settings
Settings: value.Settings,
}
return api.request(func(res *proxyLBResponse) error {
return api.update(id, api.createRequest(req), res)
})
}
// Delete 削除
func (api *ProxyLBAPI) Delete(id int64) (*sacloud.ProxyLB, error) {
return api.request(func(res *proxyLBResponse) error {
return api.delete(id, nil, res)
})
}
// ChangePlan プラン変更
func (api *ProxyLBAPI) ChangePlan(id int64, newPlan sacloud.ProxyLBPlan) (*sacloud.ProxyLB, error) {
var (
method = "PUT"
uri = fmt.Sprintf("%s/%d/plan", api.getResourceURL(), id)
)
body := &sacloud.ProxyLB{}
body.SetPlan(newPlan)
realBody := map[string]interface{}{
"CommonServiceItem": map[string]interface{}{
"ServiceClass": body.ServiceClass,
},
}
return api.request(func(res *proxyLBResponse) error {
return api.baseAPI.request(method, uri, realBody, res)
})
}
type proxyLBCertificateResponse struct {
*sacloud.ResultFlagValue
ProxyLB *sacloud.ProxyLBCertificates `json:",omitempty"`
}
// GetCertificates 証明書取得
func (api *ProxyLBAPI) GetCertificates(id int64) (*sacloud.ProxyLBCertificates, error) {
var (
method = "GET"
uri = fmt.Sprintf("%s/%d/proxylb/sslcertificate", api.getResourceURL(), id)
res = &proxyLBCertificateResponse{}
)
err := api.baseAPI.request(method, uri, nil, res)
if err != nil {
return nil, err
}
if res.ProxyLB == nil {
return nil, nil
}
return res.ProxyLB, nil
}
// SetCertificates 証明書設定
func (api *ProxyLBAPI) SetCertificates(id int64, certs *sacloud.ProxyLBCertificates) (bool, error) {
var (
method = "PUT"
uri = fmt.Sprintf("%s/%d/proxylb/sslcertificate", api.getResourceURL(), id)
res = &proxyLBCertificateResponse{}
)
err := api.baseAPI.request(method, uri, map[string]interface{}{
"ProxyLB": certs,
}, res)
if err != nil {
return false, err
}
return true, nil
}
// DeleteCertificates 証明書削除
func (api *ProxyLBAPI) DeleteCertificates(id int64) (bool, error) {
var (
method = "DELETE"
uri = fmt.Sprintf("%s/%d/proxylb/sslcertificate", api.getResourceURL(), id)
)
return api.baseAPI.modify(method, uri, nil)
}
type proxyLBHealthResponse struct {
*sacloud.ResultFlagValue
ProxyLB *sacloud.ProxyLBStatus `json:",omitempty"`
}
// Health ヘルスチェックステータス取得
func (api *ProxyLBAPI) Health(id int64) (*sacloud.ProxyLBStatus, error) {
var (
method = "GET"
uri = fmt.Sprintf("%s/%d/health", api.getResourceURL(), id)
res = &proxyLBHealthResponse{}
)
err := api.baseAPI.request(method, uri, nil, res)
if err != nil {
return nil, err
}
if res.ProxyLB == nil {
return nil, nil
}
return res.ProxyLB, nil
}
// Monitor アクティビティーモニター取得
func (api *ProxyLBAPI) Monitor(id int64, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) {
return api.baseAPI.applianceMonitorBy(id, "activity/proxylb", 0, body)
}

238
vendor/github.com/sacloud/libsacloud/api/proxylb_gen.go generated vendored Normal file
View file

@ -0,0 +1,238 @@
package api
/************************************************
generated by IDE. for [ProxyLBAPI]
************************************************/
import (
"github.com/sacloud/libsacloud/sacloud"
)
/************************************************
To support fluent interface for Find()
************************************************/
// Reset 検索条件のリセット
func (api *ProxyLBAPI) Reset() *ProxyLBAPI {
api.reset()
return api
}
// Offset オフセット
func (api *ProxyLBAPI) Offset(offset int) *ProxyLBAPI {
api.offset(offset)
return api
}
// Limit リミット
func (api *ProxyLBAPI) Limit(limit int) *ProxyLBAPI {
api.limit(limit)
return api
}
// Include 取得する項目
func (api *ProxyLBAPI) Include(key string) *ProxyLBAPI {
api.include(key)
return api
}
// Exclude 除外する項目
func (api *ProxyLBAPI) Exclude(key string) *ProxyLBAPI {
api.exclude(key)
return api
}
// FilterBy 指定キーでのフィルター
func (api *ProxyLBAPI) FilterBy(key string, value interface{}) *ProxyLBAPI {
api.filterBy(key, value, false)
return api
}
// FilterMultiBy 任意項目でのフィルタ(完全一致 OR条件)
func (api *ProxyLBAPI) FilterMultiBy(key string, value interface{}) *ProxyLBAPI {
api.filterBy(key, value, true)
return api
}
// WithNameLike 名称条件
func (api *ProxyLBAPI) WithNameLike(name string) *ProxyLBAPI {
return api.FilterBy("Name", name)
}
// WithTag タグ条件
func (api *ProxyLBAPI) WithTag(tag string) *ProxyLBAPI {
return api.FilterBy("Tags.Name", tag)
}
// WithTags タグ(複数)条件
func (api *ProxyLBAPI) WithTags(tags []string) *ProxyLBAPI {
return api.FilterBy("Tags.Name", []interface{}{tags})
}
// func (api *ProxyLBAPI) WithSizeGib(size int) *ProxyLBAPI {
// api.FilterBy("SizeMB", size*1024)
// return api
// }
// func (api *ProxyLBAPI) WithSharedScope() *ProxyLBAPI {
// api.FilterBy("Scope", "shared")
// return api
// }
// func (api *ProxyLBAPI) WithUserScope() *ProxyLBAPI {
// api.FilterBy("Scope", "user")
// return api
// }
// SortBy 指定キーでのソート
func (api *ProxyLBAPI) SortBy(key string, reverse bool) *ProxyLBAPI {
api.sortBy(key, reverse)
return api
}
// SortByName 名称でのソート
func (api *ProxyLBAPI) SortByName(reverse bool) *ProxyLBAPI {
api.sortByName(reverse)
return api
}
// func (api *ProxyLBAPI) SortBySize(reverse bool) *ProxyLBAPI {
// api.sortBy("SizeMB", reverse)
// return api
// }
/************************************************
To support Setxxx interface for Find()
************************************************/
// SetEmpty 検索条件のリセット
func (api *ProxyLBAPI) SetEmpty() {
api.reset()
}
// SetOffset オフセット
func (api *ProxyLBAPI) SetOffset(offset int) {
api.offset(offset)
}
// SetLimit リミット
func (api *ProxyLBAPI) SetLimit(limit int) {
api.limit(limit)
}
// SetInclude 取得する項目
func (api *ProxyLBAPI) SetInclude(key string) {
api.include(key)
}
// SetExclude 除外する項目
func (api *ProxyLBAPI) SetExclude(key string) {
api.exclude(key)
}
// SetFilterBy 指定キーでのフィルター
func (api *ProxyLBAPI) SetFilterBy(key string, value interface{}) {
api.filterBy(key, value, false)
}
// SetFilterMultiBy 任意項目でのフィルタ(完全一致 OR条件)
func (api *ProxyLBAPI) SetFilterMultiBy(key string, value interface{}) {
api.filterBy(key, value, true)
}
// SetNameLike 名称条件
func (api *ProxyLBAPI) SetNameLike(name string) {
api.FilterBy("Name", name)
}
// SetTag タグ条件
func (api *ProxyLBAPI) SetTag(tag string) {
api.FilterBy("Tags.Name", tag)
}
// SetTags タグ(複数)条件
func (api *ProxyLBAPI) SetTags(tags []string) {
api.FilterBy("Tags.Name", []interface{}{tags})
}
// func (api *ProxyLBAPI) SetSizeGib(size int) {
// api.FilterBy("SizeMB", size*1024)
// }
// func (api *ProxyLBAPI) SetSharedScope() {
// api.FilterBy("Scope", "shared")
// }
// func (api *ProxyLBAPI) SetUserScope() {
// api.FilterBy("Scope", "user")
// }
// SetSortBy 指定キーでのソート
func (api *ProxyLBAPI) SetSortBy(key string, reverse bool) {
api.sortBy(key, reverse)
}
// SetSortByName 名称でのソート
func (api *ProxyLBAPI) SetSortByName(reverse bool) {
api.sortByName(reverse)
}
// func (api *ProxyLBAPI) SetSortBySize(reverse bool) {
// api.sortBy("SizeMB", reverse)
// }
/************************************************
To support CRUD(Create/Read/Update/Delete)
************************************************/
// func (api *ProxyLBAPI) New() *sacloud.ProxyLB {
// return &sacloud.ProxyLB{}
// }
// func (api *ProxyLBAPI) Create(value *sacloud.ProxyLB) (*sacloud.ProxyLB, error) {
// return api.request(func(res *sacloud.Response) error {
// return api.create(api.createRequest(value), res)
// })
// }
// func (api *ProxyLBAPI) Read(id string) (*sacloud.ProxyLB, error) {
// return api.request(func(res *sacloud.Response) error {
// return api.read(id, nil, res)
// })
// }
// func (api *ProxyLBAPI) Update(id string, value *sacloud.ProxyLB) (*sacloud.ProxyLB, error) {
// return api.request(func(res *sacloud.Response) error {
// return api.update(id, api.createRequest(value), res)
// })
// }
// func (api *ProxyLBAPI) Delete(id string) (*sacloud.ProxyLB, error) {
// return api.request(func(res *sacloud.Response) error {
// return api.delete(id, nil, res)
// })
// }
/************************************************
Inner functions
************************************************/
func (api *ProxyLBAPI) setStateValue(setFunc func(*sacloud.Request)) *ProxyLBAPI {
api.baseAPI.setStateValue(setFunc)
return api
}
//func (api *ProxyLBAPI) request(f func(*sacloud.Response) error) (*sacloud.ProxyLB, error) {
// res := &sacloud.Response{}
// err := f(res)
// if err != nil {
// return nil, err
// }
// return res.ProxyLB, nil
//}
//
//func (api *ProxyLBAPI) createRequest(value *sacloud.ProxyLB) *sacloud.Request {
// req := &sacloud.Request{}
// req.ProxyLB = value
// return req
//}

View file

@ -0,0 +1,32 @@
package api
import (
"go.uber.org/ratelimit"
"net/http"
"sync"
)
// RateLimitRoundTripper 秒間アクセス数を制限するためのhttp.RoundTripper実装
type RateLimitRoundTripper struct {
// Transport 親となるhttp.RoundTripper、nilの場合http.DefaultTransportが利用される
Transport http.RoundTripper
// RateLimitPerSec 秒あたりのリクエスト数
RateLimitPerSec int
once sync.Once
rateLimit ratelimit.Limiter
}
// RoundTrip http.RoundTripperの実装
func (r *RateLimitRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
r.once.Do(func() {
r.rateLimit = ratelimit.New(r.RateLimitPerSec)
})
if r.Transport == nil {
r.Transport = http.DefaultTransport
}
r.rateLimit.Take()
return r.Transport.RoundTrip(req)
}

View file

@ -2,8 +2,9 @@ package api
import (
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"time"
"github.com/sacloud/libsacloud/sacloud"
)
// ServerAPI サーバーAPI
@ -150,14 +151,18 @@ func (api *ServerAPI) SleepUntilDown(id int64, timeout time.Duration) error {
}
// ChangePlan サーバープラン変更(サーバーIDが変更となるため注意)
func (api *ServerAPI) ChangePlan(serverID int64, planID string) (*sacloud.Server, error) {
func (api *ServerAPI) ChangePlan(serverID int64, plan *sacloud.ProductServer) (*sacloud.Server, error) {
var (
method = "PUT"
uri = fmt.Sprintf("%s/%d/to/plan/%s", api.getResourceURL(), serverID, planID)
uri = fmt.Sprintf("%s/%d/plan", api.getResourceURL(), serverID)
body = &sacloud.ProductServer{}
)
body.CPU = plan.CPU
body.MemoryMB = plan.MemoryMB
body.Generation = plan.Generation
return api.request(func(res *sacloud.Response) error {
return api.baseAPI.request(method, uri, nil, res)
return api.baseAPI.request(method, uri, body, res)
})
}

View file

@ -204,6 +204,37 @@ func (api *SIMAPI) Logs(id int64, body interface{}) ([]sacloud.SIMLog, error) {
return res.Logs, nil
}
// GetNetworkOperator 通信キャリア 取得
func (api *SIMAPI) GetNetworkOperator(id int64) (*sacloud.SIMNetworkOperatorConfigs, error) {
var (
method = "GET"
uri = fmt.Sprintf("%s/%d/sim/network_operator_config", api.getResourceURL(), id)
)
res := &sacloud.SIMNetworkOperatorConfigs{}
err := api.baseAPI.request(method, uri, nil, res)
if err != nil {
return nil, err
}
return res, nil
}
// SetNetworkOperator 通信キャリア 設定
func (api *SIMAPI) SetNetworkOperator(id int64, opConfig ...*sacloud.SIMNetworkOperatorConfig) (bool, error) {
var (
method = "PUT"
uri = fmt.Sprintf("%s/%d/sim/network_operator_config", api.getResourceURL(), id)
)
err := api.baseAPI.request(method, uri, &sacloud.SIMNetworkOperatorConfigs{NetworkOperatorConfigs: opConfig}, nil)
if err != nil {
return false, err
}
return true, nil
}
// Monitor アクティビティーモニター(Up/Down link BPS)取得
func (api *SIMAPI) Monitor(id int64, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) {
var (

View file

@ -1,9 +1,9 @@
package api
import (
"encoding/json"
// "strings"
"encoding/json" // "strings"
"fmt"
"github.com/sacloud/libsacloud/sacloud"
)
@ -118,6 +118,25 @@ func (api *SimpleMonitorAPI) Delete(id int64) (*sacloud.SimpleMonitor, error) {
})
}
// Health ヘルスチェック
//
// まだチェックが行われていない場合nilを返す
func (api *SimpleMonitorAPI) Health(id int64) (*sacloud.SimpleMonitorHealthCheckStatus, error) {
var (
method = "GET"
uri = fmt.Sprintf("%s/%d/health", api.getResourceURL(), id)
)
res := struct {
SimpleMonitor *sacloud.SimpleMonitorHealthCheckStatus `json:",omitempty"`
}{}
err := api.baseAPI.request(method, uri, nil, &res)
if err != nil {
return nil, err
}
return res.SimpleMonitor, nil
}
// MonitorResponseTimeSec アクティビティーモニター(レスポンスタイム)取得
func (api *SimpleMonitorAPI) MonitorResponseTimeSec(id int64, body *sacloud.ResourceMonitorRequest) (*sacloud.MonitorValues, error) {
var (

View file

@ -2,6 +2,7 @@ package api
import (
"fmt"
"github.com/sacloud/libsacloud/sacloud"
)

View file

@ -2,6 +2,7 @@ package api
import (
"fmt"
"github.com/sacloud/libsacloud/sacloud"
)

View file

@ -3,8 +3,9 @@ package api
import (
"encoding/json"
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"time"
"github.com/sacloud/libsacloud/sacloud"
)
//HACK: さくらのAPI側仕様: Applianceの内容によってJSONフォーマットが異なるため

View file

@ -3,8 +3,9 @@ package api
import (
"encoding/json"
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"strings"
"github.com/sacloud/libsacloud/sacloud"
)
// WebAccelAPI ウェブアクセラレータAPI

View file

@ -3,9 +3,10 @@ package api
import (
"encoding/json"
"fmt"
"github.com/sacloud/libsacloud/sacloud"
"net/url"
"strings"
"github.com/sacloud/libsacloud/sacloud"
)
// Reset 検索条件のリセット

View file

@ -2,4 +2,4 @@
package libsacloud
// Version バージョン
const Version = "1.0.0-rc5"
const Version = "1.21.1"

View file

@ -138,6 +138,36 @@ var (
// EDiskConnection ディスク接続方法
type EDiskConnection string
// EUpstreamNetworkType 上流ネットワーク種別
type EUpstreamNetworkType string
// String EUpstreamNetworkTypeの文字列表現
func (t EUpstreamNetworkType) String() string {
return string(t)
}
var (
// EUpstreamNetworkUnknown 不明
EUpstreamNetworkUnknown = EUpstreamNetworkType("unknown")
// EUpstreamNetworkShared 共有セグメント
EUpstreamNetworkShared = EUpstreamNetworkType("shared")
// EUpstreamNetworkSwitch スイッチ(非スイッチ+ルータ)
EUpstreamNetworkSwitch = EUpstreamNetworkType("switch")
// EUpstreamNetworkRouter ルータ(スイッチ+ルータのスイッチ)
EUpstreamNetworkRouter = EUpstreamNetworkType("router")
// EUpstreamNetworkNone 接続なし
EUpstreamNetworkNone = EUpstreamNetworkType("none")
// UpstreamNetworks 文字列とEUpstreamNetworkTypeのマッピング
UpstreamNetworks = map[string]EUpstreamNetworkType{
"unknown": EUpstreamNetworkUnknown,
"shared": EUpstreamNetworkShared,
"switch": EUpstreamNetworkSwitch,
"router": EUpstreamNetworkRouter,
"none": EUpstreamNetworkNone,
}
)
// SakuraCloudResources さくらのクラウド上のリソース種別一覧
type SakuraCloudResources struct {
Server *Server `json:",omitempty"` // サーバー
@ -213,7 +243,7 @@ type Request struct {
Filter map[string]interface{} `json:",omitempty"` // フィルタ
Exclude []string `json:",omitempty"` // 除外する項目
Include []string `json:",omitempty"` // 取得する項目
DistantFrom []int64 `json:",omitempty"` // ストレージ隔離対象ディスク
}
// AddFilter フィルタの追加
@ -324,3 +354,15 @@ var (
// DatetimeLayout さくらのクラウドAPIで利用される日付型のレイアウト(RFC3339)
var DatetimeLayout = "2006-01-02T15:04:05-07:00"
// PlanGenerations サーバプラン世代
type PlanGenerations int
var (
// PlanDefault デフォルト
PlanDefault = PlanGenerations(0)
// PlanG1 第1世代(Generation:100)
PlanG1 = PlanGenerations(100)
// PlanG2 第2世代(Generation:200)
PlanG2 = PlanGenerations(200)
)

14
vendor/github.com/sacloud/libsacloud/sacloud/coupon.go generated vendored Normal file
View file

@ -0,0 +1,14 @@
package sacloud
import "time"
// Coupon クーポン情報
type Coupon struct {
CouponID string `json:",omitempty"` // クーポンID
MemberID string `json:",omitempty"` // メンバーID
ContractID int64 `json:",omitempty"` // 契約ID
ServiceClassID int64 `json:",omitempty"` // サービスクラスID
Discount int64 `json:",omitempty"` // クーポン残高
AppliedAt time.Time `json:",omitempty"` // 適用開始日
UntilAt time.Time `json:",omitempty"` // 有効期限
}

View file

@ -2,9 +2,15 @@ package sacloud
import (
"encoding/json"
"fmt"
"strings"
)
// AllowDatabaseBackupWeekdays データベースバックアップ実行曜日リスト
func AllowDatabaseBackupWeekdays() []string {
return []string{"mon", "tue", "wed", "thu", "fri", "sat", "sun"}
}
// Database データベース(appliance)
type Database struct {
*Appliance // アプライアンス共通属性
@ -64,8 +70,6 @@ type DatabaseCommonRemark struct {
DatabaseRevision string `json:",omitempty"` // リビジョン
DatabaseTitle string `json:",omitempty"` // タイトル
DatabaseVersion string `json:",omitempty"` // バージョン
ReplicaPassword string `json:",omitempty"` // レプリケーションパスワード
ReplicaUser string `json:",omitempty"` // レプリケーションユーザー
}
// DatabaseSettings データベース設定リスト
@ -75,8 +79,9 @@ type DatabaseSettings struct {
// DatabaseSetting データベース設定
type DatabaseSetting struct {
Backup *DatabaseBackupSetting `json:",omitempty"` // バックアップ設定
Common *DatabaseCommonSetting `json:",oitempty"` // 共通設定
Backup *DatabaseBackupSetting `json:",omitempty"` // バックアップ設定
Common *DatabaseCommonSetting `json:",oitempty"` // 共通設定
Replication *DatabaseReplicationSetting `json:",omitempty"` // レプリケーション設定
}
// DatabaseServer データベースサーバー情報
@ -122,17 +127,20 @@ func AllowDatabasePlans() []int {
// DatabaseBackupSetting バックアップ設定
type DatabaseBackupSetting struct {
Rotate int `json:",omitempty"` // ローテーション世代数
Time string `json:",omitempty"` // 開始時刻
Rotate int `json:",omitempty"` // ローテーション世代数
Time string `json:",omitempty"` // 開始時刻
DayOfWeek []string `json:",omitempty"` // 取得曜日
}
// DatabaseCommonSetting 共通設定
type DatabaseCommonSetting struct {
DefaultUser string `json:",omitempty"` // ユーザー名
UserPassword string `json:",omitempty"` // ユーザーパスワード
WebUI interface{} `json:",omitempty"` // WebUIのIPアドレス or FQDN
ServicePort string // ポート番号
SourceNetwork SourceNetwork // 接続許可ネットワーク
DefaultUser string `json:",omitempty"` // ユーザー名
UserPassword string `json:",omitempty"` // ユーザーパスワード
WebUI interface{} `json:",omitempty"` // WebUIのIPアドレス or FQDN
ReplicaPassword string `json:",omitempty"` // レプリケーションパスワード
ReplicaUser string `json:",omitempty"` // レプリケーションユーザー
ServicePort json.Number `json:",omitempty"` // ポート番号
SourceNetwork SourceNetwork // 接続許可ネットワーク
}
// SourceNetwork 接続許可ネットワーク
@ -168,32 +176,84 @@ func (s *SourceNetwork) MarshalJSON() ([]byte, error) {
return json.Marshal(list)
}
// DatabaseReplicationSetting レプリケーション設定
type DatabaseReplicationSetting struct {
// Model レプリケーションモデル
Model DatabaseReplicationModels `json:",omitempty"`
// Appliance マスター側アプライアンス
Appliance *struct {
ID string
} `json:",omitempty"`
// IPAddress IPアドレス
IPAddress string `json:",omitempty"`
// Port ポート
Port int `json:",omitempty"`
// User ユーザー
User string `json:",omitempty"`
// Password パスワード
Password string `json:",omitempty"`
}
// DatabaseReplicationModels データベースのレプリケーションモデル
type DatabaseReplicationModels string
const (
// DatabaseReplicationModelMasterSlave レプリケーションモデル: Master-Slave(マスター側)
DatabaseReplicationModelMasterSlave = "Master-Slave"
// DatabaseReplicationModelAsyncReplica レプリケーションモデル: Async-Replica(スレーブ側)
DatabaseReplicationModelAsyncReplica = "Async-Replica"
)
// CreateDatabaseValue データベース作成用パラメータ
type CreateDatabaseValue struct {
Plan DatabasePlan // プラン
AdminPassword string // 管理者パスワード
DefaultUser string // ユーザー名
UserPassword string // パスワード
SourceNetwork []string // 接続許可ネットワーク
ServicePort string // ポート
// BackupRotate int // バックアップ世代数
BackupTime string // バックアップ開始時間
SwitchID string // 接続先スイッチ
IPAddress1 string // IPアドレス1
MaskLen int // ネットワークマスク長
DefaultRoute string // デフォルトルート
Name string // 名称
Description string // 説明
Tags []string // タグ
Icon *Resource // アイコン
WebUI bool // WebUI有効
DatabaseName string // データベース名
DatabaseRevision string // リビジョン
DatabaseTitle string // データベースタイトル
DatabaseVersion string // データベースバージョン
ReplicaUser string // ReplicaUser レプリケーションユーザー
SourceAppliance *Resource // クローン元DB
//ReplicaPassword string // in current API version , setted admin password
Plan DatabasePlan // プラン
AdminPassword string // 管理者パスワード
DefaultUser string // ユーザー名
UserPassword string // パスワード
SourceNetwork []string // 接続許可ネットワーク
ServicePort int // ポート
EnableBackup bool // バックアップ有効化
BackupRotate int // バックアップ世代数
BackupTime string // バックアップ開始時間
BackupDayOfWeek []string // バックアップ取得曜日
SwitchID string // 接続先スイッチ
IPAddress1 string // IPアドレス1
MaskLen int // ネットワークマスク長
DefaultRoute string // デフォルトルート
Name string // 名称
Description string // 説明
Tags []string // タグ
Icon *Resource // アイコン
WebUI bool // WebUI有効
DatabaseName string // データベース名
DatabaseRevision string // リビジョン
DatabaseTitle string // データベースタイトル
DatabaseVersion string // データベースバージョン
// ReplicaUser string // レプリケーションユーザー 現在はreplica固定
ReplicaPassword string // レプリケーションパスワード
SourceAppliance *Resource // クローン元DB
}
// SlaveDatabaseValue スレーブデータベース作成用パラメータ
type SlaveDatabaseValue struct {
Plan DatabasePlan // プラン
DefaultUser string // ユーザー名
UserPassword string // パスワード
SwitchID string // 接続先スイッチ
IPAddress1 string // IPアドレス1
MaskLen int // ネットワークマスク長
DefaultRoute string // デフォルトルート
Name string // 名称
Description string // 説明
Tags []string // タグ
Icon *Resource // アイコン
DatabaseName string // データベース名
DatabaseVersion string // データベースバージョン
// ReplicaUser string // レプリケーションユーザー 現在はreplica固定
ReplicaPassword string // レプリケーションパスワード
MasterApplianceID int64 // クローン元DB
MasterIPAddress string // マスターIPアドレス
MasterPort int // マスターポート
}
// NewCreatePostgreSQLDatabaseValue PostgreSQL作成用パラメーター
@ -267,10 +327,6 @@ func CreateNewDatabase(values *CreateDatabaseValue) *Database {
DatabaseTitle: values.DatabaseTitle,
// DatabaseVersion
DatabaseVersion: values.DatabaseVersion,
// ReplicaUser
// ReplicaUser: values.ReplicaUser,
// ReplicaPassword
// ReplicaPassword: values.AdminPassword,
},
},
// Plan
@ -288,6 +344,8 @@ func CreateNewDatabase(values *CreateDatabaseValue) *Database {
Rotate: 8,
// Time
Time: values.BackupTime,
// DayOfWeek
DayOfWeek: values.BackupDayOfWeek,
},
// Common
Common: &DatabaseCommonSetting{
@ -297,13 +355,19 @@ func CreateNewDatabase(values *CreateDatabaseValue) *Database {
UserPassword: values.UserPassword,
// SourceNetwork
SourceNetwork: SourceNetwork(values.SourceNetwork),
// ServicePort
ServicePort: values.ServicePort,
},
},
},
}
if values.ServicePort > 0 {
db.Settings.DBConf.Common.ServicePort = json.Number(fmt.Sprintf("%d", values.ServicePort))
}
if !values.EnableBackup {
db.Settings.DBConf.Backup = nil
}
db.Remark.Switch = &ApplianceRemarkSwitch{
// ID
ID: values.SwitchID,
@ -323,11 +387,19 @@ func CreateNewDatabase(values *CreateDatabaseValue) *Database {
db.Settings.DBConf.Common.WebUI = values.WebUI
}
if values.ReplicaPassword != "" {
db.Settings.DBConf.Common.ReplicaUser = "replica"
db.Settings.DBConf.Common.ReplicaPassword = values.ReplicaPassword
db.Settings.DBConf.Replication = &DatabaseReplicationSetting{
Model: DatabaseReplicationModelMasterSlave,
}
}
return db
}
// CloneNewDatabase データベース作成
func CloneNewDatabase(values *CreateDatabaseValue) *Database {
// NewSlaveDatabaseValue スレーブ向けパラメータ作成
func NewSlaveDatabaseValue(values *SlaveDatabaseValue) *Database {
db := &Database{
// Appliance
Appliance: &Appliance{
@ -363,32 +435,34 @@ func CloneNewDatabase(values *CreateDatabaseValue) *Database {
DBConf: &DatabaseCommonRemarks{
// Common
Common: &DatabaseCommonRemark{
DatabaseName: values.DatabaseName,
// DatabaseName
DatabaseName: values.DatabaseName,
// DatabaseVersion
DatabaseVersion: values.DatabaseVersion,
},
},
// Plan
propPlanID: propPlanID{Plan: &Resource{ID: int64(values.Plan)}},
SourceAppliance: values.SourceAppliance,
propPlanID: propPlanID{Plan: &Resource{ID: int64(values.Plan)}},
},
// Settings
Settings: &DatabaseSettings{
// DBConf
DBConf: &DatabaseSetting{
// Backup
Backup: &DatabaseBackupSetting{
// Rotate
// Rotate: values.BackupRotate,
Rotate: 8,
// Time
Time: values.BackupTime,
},
// Common
Common: &DatabaseCommonSetting{
// SourceNetwork
SourceNetwork: SourceNetwork(values.SourceNetwork),
// ServicePort
ServicePort: values.ServicePort,
// DefaultUser
DefaultUser: values.DefaultUser,
// UserPassword
UserPassword: values.UserPassword,
},
// Replication
Replication: &DatabaseReplicationSetting{
Model: DatabaseReplicationModelAsyncReplica,
Appliance: &struct{ ID string }{ID: fmt.Sprintf("%d", values.MasterApplianceID)},
IPAddress: values.MasterIPAddress,
Port: values.MasterPort,
User: "replica",
Password: values.ReplicaPassword,
},
},
},
@ -409,10 +483,6 @@ func CloneNewDatabase(values *CreateDatabaseValue) *Database {
map[string]interface{}{"IPAddress": values.IPAddress1},
}
if values.WebUI {
db.Settings.DBConf.Common.WebUI = values.WebUI
}
return db
}
@ -433,3 +503,71 @@ func (s *Database) DeleteSourceNetwork(nw string) {
}
s.Settings.DBConf.Common.SourceNetwork = SourceNetwork(res)
}
// IsReplicationMaster レプリケーションが有効かつマスターとして構成されているか
func (s *Database) IsReplicationMaster() bool {
return s.IsReplicationEnabled() && s.Settings.DBConf.Replication.Model == DatabaseReplicationModelMasterSlave
}
// IsReplicationEnabled レプリケーションが有効な場合はTrueを返す
func (s *Database) IsReplicationEnabled() bool {
return s.Settings.DBConf.Replication != nil
}
// DatabaseName MariaDB or PostgreSQLの何れかを返す
func (s *Database) DatabaseName() string {
return s.Remark.DBConf.Common.DatabaseName
}
// DatabaseRevision データベースのリビジョンを返す
//
// 例: MariaDBの場合 => 10.2.15 / PostgreSQLの場合 => 10.3
func (s *Database) DatabaseRevision() string {
return s.Remark.DBConf.Common.DatabaseRevision
}
// DatabaseVersion データベースのバージョンを返す
//
// 例: MariaDBの場合 => 10.2 / PostgreSQLの場合 => 10
func (s *Database) DatabaseVersion() string {
return s.Remark.DBConf.Common.DatabaseVersion
}
// WebUIAddress WebUIが有効な場合、IPアドレス or FQDNを返す、無効な場合は空文字を返す
func (s *Database) WebUIAddress() string {
webUI := s.Settings.DBConf.Common.WebUI
if webUI != nil {
if v, ok := webUI.(string); ok {
return v
}
}
return ""
}
// IPAddress IPアドレスを取得
func (s *Database) IPAddress() string {
if len(s.Remark.Servers) < 1 {
return ""
}
v, ok := s.Remark.Servers[0].(map[string]string)
if !ok {
return ""
}
return v["IPAddress"]
}
// NetworkMaskLen ネットワークマスク長を取得
func (s *Database) NetworkMaskLen() int {
if s.Remark.Network == nil {
return -1
}
return s.Remark.Network.NetworkMaskLen
}
// DefaultRoute デフォルトゲートウェイアドレスを取得
func (s *Database) DefaultRoute() string {
if s.Remark.Network == nil {
return ""
}
return s.Remark.Network.DefaultRoute
}

View file

@ -4,22 +4,23 @@ import "fmt"
// Disk ディスク
type Disk struct {
*Resource // ID
propAvailability // 有功状態
propName // 名称
propDescription // 説明
propSizeMB // サイズ(MB単位)
propMigratedMB // コピー済みデータサイズ(MB単位)
propCopySource // コピー元情報
propJobStatus // マイグレーションジョブステータス
propBundleInfo // バンドル情報
propServer // サーバー
propIcon // アイコン
propTags // タグ
propCreatedAt // 作成日時
propPlanID // プランID
propDiskConnection // ディスク接続情報
propDistantFrom // ストレージ隔離対象ディスク
*Resource // ID
propAvailability // 有功状態
propName // 名称
propDescription // 説明
propSizeMB // サイズ(MB単位)
propMigratedMB // コピー済みデータサイズ(MB単位)
propCopySource // コピー元情報
propJobStatus // マイグレーションジョブステータス
propBundleInfo // バンドル情報
propServer // サーバー
propIcon // アイコン
propTags // タグ
propCreatedAt // 作成日時
propPlanID // プランID
propDiskConnection // ディスク接続情報
propDistantFrom // ストレージ隔離対象ディスク
Generation PlanGenerations `json:",omitempty"` // プラン世代
ReinstallCount int `json:",omitempty"` // 再インストール回数

View file

@ -50,7 +50,9 @@ func CreateNewDNS(zoneName string) *DNS {
Class: "dns",
},
Settings: DNSSettings{
DNS: DNSRecordSets{},
DNS: DNSRecordSets{
ResourceRecordSets: []DNSRecordSet{},
},
},
}
}
@ -135,7 +137,9 @@ func (d *DNS) AddRecord(record *DNSRecordSet) {
// ClearRecords レコード クリア
func (d *DNS) ClearRecords() {
d.Settings.DNS = DNSRecordSets{}
d.Settings.DNS = DNSRecordSets{
ResourceRecordSets: []DNSRecordSet{},
}
}
// DNSRecordSets DNSレコード設定リスト

View file

@ -41,3 +41,21 @@ func (i *Interface) GetHostName() string {
func (i *Interface) GetPacketFilter() *PacketFilter {
return i.PacketFilter
}
// UpstreamType 上流ネットワーク種別
func (i *Interface) UpstreamType() EUpstreamNetworkType {
sw := i.Switch
if sw == nil {
return EUpstreamNetworkNone
}
if sw.Subnet == nil {
return EUpstreamNetworkSwitch
}
if sw.Scope == ESCopeShared {
return EUpstreamNetworkShared
}
return EUpstreamNetworkRouter
}

View file

@ -41,7 +41,7 @@ func (i *Internet) SetNetworkMaskLen(v int) {
// AllowInternetBandWidth 設定可能な帯域幅の値リスト
func AllowInternetBandWidth() []int {
return []int{100, 250, 500, 1000, 1500, 2000, 2500, 3000}
return []int{100, 250, 500, 1000, 1500, 2000, 2500, 3000, 5000}
}
// AllowInternetNetworkMaskLen 設定可能なネットワークマスク長の値リスト

View file

@ -1,5 +1,7 @@
package sacloud
import "strconv"
// LoadBalancer ロードバランサー
type LoadBalancer struct {
*Appliance // アプライアンス共通属性
@ -8,6 +10,43 @@ type LoadBalancer struct {
Settings *LoadBalancerSettings `json:",omitempty"` // ロードバランサー設定
}
// IsHA 冗長化されている場合にtrueを返す
func (l *LoadBalancer) IsHA() bool {
isHA := false
if len(l.Remark.Servers) > 1 {
if v, ok := l.Remark.Servers[1].(map[string]string); ok {
if _, ok := v["IPAddress"]; ok {
isHA = true
}
}
}
return isHA
}
// IPAddress1 ロードバランサ本体のIPアドレス(1番目)を返す
func (l *LoadBalancer) IPAddress1() string {
if len(l.Remark.Servers) > 0 {
if v, ok := l.Remark.Servers[0].(map[string]string); ok {
if v, ok := v["IPAddress"]; ok {
return v
}
}
}
return ""
}
// IPAddress2 ロードバランサ本体のIPアドレス(2番目)を返す
func (l *LoadBalancer) IPAddress2() string {
if len(l.Remark.Servers) > 1 {
if v, ok := l.Remark.Servers[1].(map[string]string); ok {
if v, ok := v["IPAddress"]; ok {
return v
}
}
}
return ""
}
// LoadBalancerRemark リマーク
type LoadBalancerRemark struct {
*ApplianceRemarkBase
@ -17,7 +56,7 @@ type LoadBalancerRemark struct {
// LoadBalancerSettings ロードバランサー設定リスト
type LoadBalancerSettings struct {
LoadBalancer []*LoadBalancerSetting `json:",omitempty"` // ロードバランサー設定リスト
LoadBalancer []*LoadBalancerSetting // ロードバランサー設定リスト
}
// LoadBalancerSetting ロードバランサー仮想IP設定
@ -26,6 +65,7 @@ type LoadBalancerSetting struct {
Port string `json:",omitempty"` // ポート番号
DelayLoop string `json:",omitempty"` // 監視間隔
SorryServer string `json:",omitempty"` // ソーリーサーバー
Description string `json:",omitempty"` // 説明
Servers []*LoadBalancerServer `json:",omitempty"` // 仮想IP配下の実サーバー
}
@ -179,3 +219,73 @@ func (s *LoadBalancerSetting) DeleteServer(ip string, port string) {
s.Servers = res
}
// LoadBalancerStatusResult ロードバランサーのステータスAPI戻り値
type LoadBalancerStatusResult []*LoadBalancerStatus
// Get VIPに対応するステータスを取得
func (l *LoadBalancerStatusResult) Get(vip string) *LoadBalancerStatus {
for _, v := range *l {
if v.VirtualIPAddress == vip {
return v
}
}
return nil
}
// LoadBalancerStatus ロードバランサーのステータス
type LoadBalancerStatus struct {
VirtualIPAddress string
Port string
Servers []*LoadBalancerServerStatus `json:",omitempty"`
CPS string
}
// Get IPアドレスに対応する実サーバのステータスを取得
func (l *LoadBalancerStatus) Get(ip string) *LoadBalancerServerStatus {
for _, v := range l.Servers {
if v.IPAddress == ip {
return v
}
}
return nil
}
// NumCPS CPSを数値にして返す
func (l *LoadBalancerStatus) NumCPS() int {
v, _ := strconv.Atoi(l.CPS) // nolint - ignore error
return v
}
// NumPort Portを数値にして返す
func (l *LoadBalancerStatus) NumPort() int {
v, _ := strconv.Atoi(l.Port) // nolint - ignore error
return v
}
// LoadBalancerServerStatus ロードバランサーのVIP配下の実サーバのステータス
type LoadBalancerServerStatus struct {
ActiveConn string
IPAddress string
Status string
Port string
CPS string
}
// NumActiveConn ActiveConnを数値にして返す
func (l *LoadBalancerServerStatus) NumActiveConn() int {
v, _ := strconv.Atoi(l.ActiveConn) // nolint - ignore error
return v
}
// NumCPS CPSを数値にして返す
func (l *LoadBalancerServerStatus) NumCPS() int {
v, _ := strconv.Atoi(l.CPS) // nolint - ignore error
return v
}
// NumPort Portを数値にして返す
func (l *LoadBalancerServerStatus) NumPort() int {
v, _ := strconv.Atoi(l.Port) // nolint - ignore error
return v
}

29
vendor/github.com/sacloud/libsacloud/sacloud/lock.go generated vendored Normal file
View file

@ -0,0 +1,29 @@
package sacloud
import (
"fmt"
"github.com/sacloud/libsacloud/utils/mutexkv"
)
var resourceMu = mutexkv.NewMutexKV()
// LockByKey 任意のキーでのMutexロック
func LockByKey(key string) {
resourceMu.Lock(key)
}
// UnlockByKey 任意のキーでのMutexアンロック
func UnlockByKey(key string) {
resourceMu.Unlock(key)
}
// LockByResourceID リソース単位でのMutexロック
func LockByResourceID(resourceID int64) {
resourceMu.Lock(fmt.Sprintf("%d", resourceID))
}
// UnlockByResourceID リソース単位でのMutexアンロック
func UnlockByResourceID(resourceID int64) {
resourceMu.Unlock(fmt.Sprintf("%d", resourceID))
}

View file

@ -3,6 +3,7 @@ package sacloud
import (
"encoding/json"
"fmt"
"strconv"
"strings"
)
@ -28,9 +29,71 @@ type MobileGatewaySettings struct {
// MobileGatewaySetting モバイルゲートウェイ設定
type MobileGatewaySetting struct {
InternetConnection *MGWInternetConnection `json:",omitempty"` // インターネット接続
Interfaces []*MGWInterface `json:",omitempty"` // インターフェース
StaticRoutes []*MGWStaticRoute `json:",omitempty"` // スタティックルート
InternetConnection *MGWInternetConnection `json:",omitempty"` // インターネット接続
InterDeviceCommunication *MGWInterDeviceCommunication `json:",omitempty"` // デバイス間通信
Interfaces []*MGWInterface `json:",omitempty"` // インターフェース
StaticRoutes []*MGWStaticRoute `json:",omitempty"` // スタティックルート
}
// HasStaticRoutes スタティックルートを保持しているか
func (m *MobileGatewaySetting) HasStaticRoutes() bool {
return m.StaticRoutes != nil && len(m.StaticRoutes) > 0
}
// AddStaticRoute スタティックルート設定 追加
func (m *MobileGatewaySetting) AddStaticRoute(prefix string, nextHop string) (int, *MGWStaticRoute) {
if m.StaticRoutes == nil {
m.StaticRoutes = []*MGWStaticRoute{}
}
s := &MGWStaticRoute{
Prefix: prefix,
NextHop: nextHop,
}
m.StaticRoutes = append(m.StaticRoutes, s)
return len(m.StaticRoutes) - 1, s
}
// RemoveStaticRoute スタティックルート設定 削除
func (m *MobileGatewaySetting) RemoveStaticRoute(prefix string, nextHop string) {
if m.StaticRoutes == nil {
return
}
dest := []*MGWStaticRoute{}
for _, s := range m.StaticRoutes {
if s.Prefix != prefix || s.NextHop != nextHop {
dest = append(dest, s)
}
}
m.StaticRoutes = dest
}
// RemoveStaticRouteAt スタティックルート設定 削除
func (m *MobileGatewaySetting) RemoveStaticRouteAt(index int) {
if m.StaticRoutes == nil {
return
}
if index < len(m.StaticRoutes) {
s := m.StaticRoutes[index]
m.RemoveStaticRoute(s.Prefix, s.NextHop)
}
}
// FindStaticRoute スタティックルート設定 検索
func (m *MobileGatewaySetting) FindStaticRoute(prefix string, nextHop string) (int, *MGWStaticRoute) {
for i, s := range m.StaticRoutes {
if s.Prefix == prefix && s.NextHop == nextHop {
return i, s
}
}
return -1, nil
}
// MGWInterDeviceCommunication デバイス間通信
type MGWInterDeviceCommunication struct {
Enabled string `json:",omitempty"`
}
// MGWInternetConnection インターネット接続
@ -121,6 +184,68 @@ func (m *MobileGateway) ClearPrivateInterface() {
m.Settings.MobileGateway.Interfaces = []*MGWInterface{nil}
}
// HasSetting モバイルゲートウェイ設定を保持しているか
func (m *MobileGateway) HasSetting() bool {
return m.Settings != nil && m.Settings.MobileGateway != nil
}
// HasStaticRoutes スタティックルートを保持しているか
func (m *MobileGateway) HasStaticRoutes() bool {
return m.HasSetting() && m.Settings.MobileGateway.HasStaticRoutes()
}
// InternetConnection インターネット接続が有効な場合にTrueを返す
func (m *MobileGateway) InternetConnection() bool {
return m.HasSetting() &&
m.Settings.MobileGateway.InternetConnection != nil &&
m.Settings.MobileGateway.InternetConnection.Enabled == "True"
}
// InterDeviceCommunication デバイス間通信が有効な場合にTrueを返す
func (m *MobileGateway) InterDeviceCommunication() bool {
return m.HasSetting() &&
m.Settings.MobileGateway.InterDeviceCommunication != nil &&
m.Settings.MobileGateway.InterDeviceCommunication.Enabled == "True"
}
// IPAddress 0番目のNICのIPアドレスを取得
func (m *MobileGateway) IPAddress() string {
return m.IPAddressAt(0)
}
// IPAddressAt IPアドレスを取得
func (m *MobileGateway) IPAddressAt(index int) string {
if len(m.Interfaces) <= index {
return ""
}
if index == 0 {
return m.Interfaces[0].IPAddress
}
ipaddresses := m.Settings.MobileGateway.Interfaces[index].IPAddress
if len(ipaddresses) < 1 {
return ""
}
return ipaddresses[0]
}
// NetworkMaskLen 0番目のNICのネットワークマスク長を取得
func (m *MobileGateway) NetworkMaskLen() int {
return m.NetworkMaskLenAt(0)
}
// NetworkMaskLenAt ネットワークマスク長を取得
func (m *MobileGateway) NetworkMaskLenAt(index int) int {
if len(m.Interfaces) <= index {
return -1
}
if index == 0 {
return m.Interfaces[0].Switch.UserSubnet.NetworkMaskLen
}
return m.Settings.MobileGateway.Interfaces[0].NetworkMaskLen
}
// NewMobileGatewayResolver DNS登録用パラメータ作成
func NewMobileGatewayResolver(dns1, dns2 string) *MobileGatewayResolver {
return &MobileGatewayResolver{
@ -175,7 +300,7 @@ type MobileGatewaySIMRoutes struct {
}
// AddSIMRoute SIMルート追加
func (m *MobileGatewaySIMRoutes) AddSIMRoute(simID int64, prefix string) bool {
func (m *MobileGatewaySIMRoutes) AddSIMRoute(simID int64, prefix string) (int, *MobileGatewaySIMRoute) {
var exists bool
for _, route := range m.SIMRoutes {
if route.ResourceID == fmt.Sprintf("%d", simID) && route.Prefix == prefix {
@ -184,12 +309,14 @@ func (m *MobileGatewaySIMRoutes) AddSIMRoute(simID int64, prefix string) bool {
}
}
if !exists {
m.SIMRoutes = append(m.SIMRoutes, &MobileGatewaySIMRoute{
r := &MobileGatewaySIMRoute{
ResourceID: fmt.Sprintf("%d", simID),
Prefix: prefix,
})
}
m.SIMRoutes = append(m.SIMRoutes, r)
return len(m.SIMRoutes) - 1, r
}
return !exists
return -1, nil
}
// DeleteSIMRoute SIMルート削除
@ -207,3 +334,79 @@ func (m *MobileGatewaySIMRoutes) DeleteSIMRoute(simID int64, prefix string) bool
m.SIMRoutes = routes
return exists
}
// DeleteSIMRouteAt SIMルート削除
func (m *MobileGatewaySIMRoutes) DeleteSIMRouteAt(index int) bool {
if m.SIMRoutes == nil {
return false
}
if index < len(m.SIMRoutes) {
s := m.SIMRoutes[index]
if simID, err := strconv.ParseInt(s.ResourceID, 10, 64); err == nil {
return m.DeleteSIMRoute(simID, s.Prefix)
}
}
return false
}
// FindSIMRoute SIMルート設定 検索
func (m *MobileGatewaySIMRoutes) FindSIMRoute(simID int64, prefix string) (int, *MobileGatewaySIMRoute) {
for i, r := range m.SIMRoutes {
if r.Prefix == prefix && r.ResourceID == fmt.Sprintf("%d", simID) {
return i, r
}
}
return -1, nil
}
// TrafficStatus トラフィックコントロール 当月通信量
type TrafficStatus struct {
UplinkBytes uint64 `json:"uplink_bytes,omitempty"`
DownlinkBytes uint64 `json:"downlink_bytes,omitempty"`
TrafficShaping bool `json:"traffic_shaping"` // 帯域制限
}
// UnmarshalJSON JSONアンマーシャル(uint64文字列対応)
func (s *TrafficStatus) UnmarshalJSON(data []byte) error {
tmp := &struct {
UplinkBytes string `json:"uplink_bytes,omitempty"`
DownlinkBytes string `json:"downlink_bytes,omitempty"`
TrafficShaping bool `json:"traffic_shaping"`
}{}
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
var err error
s.UplinkBytes, err = strconv.ParseUint(tmp.UplinkBytes, 10, 64)
if err != nil {
return err
}
s.DownlinkBytes, err = strconv.ParseUint(tmp.DownlinkBytes, 10, 64)
if err != nil {
return err
}
s.TrafficShaping = tmp.TrafficShaping
return nil
}
// TrafficMonitoringConfig トラフィックコントロール 設定
type TrafficMonitoringConfig struct {
TrafficQuotaInMB int `json:"traffic_quota_in_mb"`
BandWidthLimitInKbps int `json:"bandwidth_limit_in_kbps"`
EMailConfig *TrafficMonitoringNotifyEmail `json:"email_config"`
SlackConfig *TrafficMonitoringNotifySlack `json:"slack_config"`
AutoTrafficShaping bool `json:"auto_traffic_shaping"`
}
// TrafficMonitoringNotifyEmail トラフィックコントロール通知設定
type TrafficMonitoringNotifyEmail struct {
Enabled bool `json:"enabled"` // 有効/無効
}
// TrafficMonitoringNotifySlack トラフィックコントロール通知設定
type TrafficMonitoringNotifySlack struct {
Enabled bool `json:"enabled"` // 有効/無効
IncomingWebhooksURL string `json:"slack_url,omitempty"` // Slack通知の場合のWebhook URL
}

View file

@ -9,23 +9,27 @@ import (
// MonitorValue アクティビティモニター
type MonitorValue struct {
CPUTime *float64 `json:"CPU-TIME,omitempty"` // CPU時間
Write *float64 `json:",omitempty"` // ディスク書き込み
Read *float64 `json:",omitempty"` // ディスク読み取り
Receive *float64 `json:",omitempty"` // パケット受信
Send *float64 `json:",omitempty"` // パケット送信
In *float64 `json:",omitempty"` // パケット受信
Out *float64 `json:",omitempty"` // パケット送信
TotalMemorySize *float64 `json:"Total-Memory-Size,omitempty"` // 総メモリサイズ
UsedMemorySize *float64 `json:"Used-Memory-Size,omitempty"` // 使用済みメモリサイズ
TotalDisk1Size *float64 `json:"Total-Disk1-Size,omitempty"` // 総ディスクサイズ
UsedDisk1Size *float64 `json:"Used-Disk1-Size,omitempty"` // 使用済みディスクサイズ
TotalDisk2Size *float64 `json:"Total-Disk2-Size,omitempty"` // 総ディスクサイズ
UsedDisk2Size *float64 `json:"Used-Disk2-Size,omitempty"` // 使用済みディスクサイズ
FreeDiskSize *float64 `json:"Free-Disk-Size,omitempty"` // 空きディスクサイズ(NFS)
ResponseTimeSec *float64 `json:"responsetimesec,omitempty"` // レスポンスタイム(シンプル監視)
UplinkBPS *float64 `json:"UplinkBps,omitempty"` // 上り方向トラフィック
DownlinkBPS *float64 `json:"DownlinkBps"` // 下り方向トラフィック
CPUTime *float64 `json:"CPU-TIME,omitempty"` // CPU時間
Write *float64 `json:",omitempty"` // ディスク書き込み
Read *float64 `json:",omitempty"` // ディスク読み取り
Receive *float64 `json:",omitempty"` // パケット受信
Send *float64 `json:",omitempty"` // パケット送信
In *float64 `json:",omitempty"` // パケット受信
Out *float64 `json:",omitempty"` // パケット送信
TotalMemorySize *float64 `json:"Total-Memory-Size,omitempty"` // 総メモリサイズ
UsedMemorySize *float64 `json:"Used-Memory-Size,omitempty"` // 使用済みメモリサイズ
TotalDisk1Size *float64 `json:"Total-Disk1-Size,omitempty"` // 総ディスクサイズ
UsedDisk1Size *float64 `json:"Used-Disk1-Size,omitempty"` // 使用済みディスクサイズ
TotalDisk2Size *float64 `json:"Total-Disk2-Size,omitempty"` // 総ディスクサイズ
UsedDisk2Size *float64 `json:"Used-Disk2-Size,omitempty"` // 使用済みディスクサイズ
BinlogUsedSizeKiB *float64 `json:"binlogUsedSizeKiB,omitempty"` // バイナリログのサイズ(レプリケーション有効時のみ、master/slave両方で利用可能)
DelayTimeSec *float64 `json:"delayTimeSec,omitempty"` // レプリケーション遅延時間(レプリケーション有効時のみ、slave側のみ)
FreeDiskSize *float64 `json:"Free-Disk-Size,omitempty"` // 空きディスクサイズ(NFS)
ResponseTimeSec *float64 `json:"responsetimesec,omitempty"` // レスポンスタイム(シンプル監視)
UplinkBPS *float64 `json:"UplinkBps,omitempty"` // 上り方向トラフィック
DownlinkBPS *float64 `json:"DownlinkBps,omitempty"` // 下り方向トラフィック
ActiveConnections *float64 `json:"activeConnections,omitempty"` // アクティブコネクション(プロキシLB)
ConnectionsPerSec *float64 `json:"connectionsPerSec,omitempty"` // 秒間コネクション数
}
// UnmarshalJSON JSONアンマーシャル(配列、オブジェクトが混在するためここで対応)
@ -36,23 +40,27 @@ func (m *MonitorValue) UnmarshalJSON(data []byte) error {
}
tmp := &struct {
CPUTime *float64 `json:"CPU-TIME,omitempty"`
Write *float64 `json:",omitempty"`
Read *float64 `json:",omitempty"`
Receive *float64 `json:",omitempty"`
Send *float64 `json:",omitempty"`
In *float64 `json:",omitempty"`
Out *float64 `json:",omitempty"`
TotalMemorySize *float64 `json:"Total-Memory-Size,omitempty"`
UsedMemorySize *float64 `json:"Used-Memory-Size,omitempty"`
TotalDisk1Size *float64 `json:"Total-Disk1-Size,omitempty"`
UsedDisk1Size *float64 `json:"Used-Disk1-Size,omitempty"`
TotalDisk2Size *float64 `json:"Total-Disk2-Size,omitempty"`
UsedDisk2Size *float64 `json:"Used-Disk2-Size,omitempty"`
FreeDiskSize *float64 `json:"Free-Disk-Size,omitempty"`
ResponseTimeSec *float64 `json:"responsetimesec,omitempty"`
UplinkBPS *float64 `json:"UplinkBps,omitempty"`
DownlinkBPS *float64 `json:"DownlinkBps"`
CPUTime *float64 `json:"CPU-TIME,omitempty"`
Write *float64 `json:",omitempty"`
Read *float64 `json:",omitempty"`
Receive *float64 `json:",omitempty"`
Send *float64 `json:",omitempty"`
In *float64 `json:",omitempty"`
Out *float64 `json:",omitempty"`
TotalMemorySize *float64 `json:"Total-Memory-Size,omitempty"`
UsedMemorySize *float64 `json:"Used-Memory-Size,omitempty"`
TotalDisk1Size *float64 `json:"Total-Disk1-Size,omitempty"`
UsedDisk1Size *float64 `json:"Used-Disk1-Size,omitempty"`
TotalDisk2Size *float64 `json:"Total-Disk2-Size,omitempty"`
UsedDisk2Size *float64 `json:"Used-Disk2-Size,omitempty"`
BinlogUsedSizeKiB *float64 `json:"binlogUsedSizeKiB,omitempty"`
DelayTimeSec *float64 `json:"delayTimeSec,omitempty"`
FreeDiskSize *float64 `json:"Free-Disk-Size,omitempty"`
ResponseTimeSec *float64 `json:"responsetimesec,omitempty"`
UplinkBPS *float64 `json:"UplinkBps,omitempty"`
DownlinkBPS *float64 `json:"DownlinkBps,omitempty"`
ActiveConnections *float64 `json:"activeConnections,omitempty"`
ConnectionsPerSec *float64 `json:"connectionsPerSec,omitempty"`
}{}
if err := json.Unmarshal(data, &tmp); err != nil {
return err
@ -71,10 +79,14 @@ func (m *MonitorValue) UnmarshalJSON(data []byte) error {
m.UsedDisk1Size = tmp.UsedDisk1Size
m.TotalDisk2Size = tmp.TotalDisk2Size
m.UsedDisk2Size = tmp.UsedDisk2Size
m.BinlogUsedSizeKiB = tmp.BinlogUsedSizeKiB
m.DelayTimeSec = tmp.DelayTimeSec
m.FreeDiskSize = tmp.FreeDiskSize
m.ResponseTimeSec = tmp.ResponseTimeSec
m.UplinkBPS = tmp.UplinkBPS
m.DownlinkBPS = tmp.DownlinkBPS
m.ActiveConnections = tmp.ActiveConnections
m.ConnectionsPerSec = tmp.ConnectionsPerSec
return nil
}
@ -104,6 +116,23 @@ type ResourceMonitorResponse struct {
Data *MonitorValues `json:",omitempty"` // メトリクス
}
// UnmarshalJSON JSONアンマーシャル(配列、オブジェクトが混在するためここで対応)
func (m *MonitorValues) UnmarshalJSON(data []byte) error {
targetData := strings.Replace(strings.Replace(string(data), " ", "", -1), "\n", "", -1)
if targetData == `[]` {
return nil
}
tmp := map[string]*MonitorValue{}
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
value := MonitorValues(tmp)
*m = value
return nil
}
// MonitorSummaryData メトリクスサマリー
type MonitorSummaryData struct {
Max float64 // 最大値
@ -242,6 +271,16 @@ func (m *MonitorValues) FlattenUsedDisk2SizeValue() ([]FlatMonitorValue, error)
return m.flattenValue(func(v *MonitorValue) *float64 { return v.UsedDisk2Size })
}
// FlattenBinlogUsedSizeKiBValue フラット化 バイナリログサイズ
func (m *MonitorValues) FlattenBinlogUsedSizeKiBValue() ([]FlatMonitorValue, error) {
return m.flattenValue(func(v *MonitorValue) *float64 { return v.BinlogUsedSizeKiB })
}
// FlattenDelayTimeSecValue フラット化 レプリケーション遅延時間
func (m *MonitorValues) FlattenDelayTimeSecValue() ([]FlatMonitorValue, error) {
return m.flattenValue(func(v *MonitorValue) *float64 { return v.DelayTimeSec })
}
// FlattenFreeDiskSizeValue フラット化 空きディスクサイズ(NFS)
func (m *MonitorValues) FlattenFreeDiskSizeValue() ([]FlatMonitorValue, error) {
return m.flattenValue(func(v *MonitorValue) *float64 { return v.FreeDiskSize })
@ -262,6 +301,16 @@ func (m *MonitorValues) FlattenDownlinkBPSValue() ([]FlatMonitorValue, error) {
return m.flattenValue(func(v *MonitorValue) *float64 { return v.DownlinkBPS })
}
// FlattenActiveConnections フラット化 アクティブコネクション
func (m *MonitorValues) FlattenActiveConnections() ([]FlatMonitorValue, error) {
return m.flattenValue(func(v *MonitorValue) *float64 { return v.ActiveConnections })
}
// FlattenConnectionsPerSec フラット化 秒間接続数
func (m *MonitorValues) FlattenConnectionsPerSec() ([]FlatMonitorValue, error) {
return m.flattenValue(func(v *MonitorValue) *float64 { return v.ConnectionsPerSec })
}
func (m *MonitorValues) flattenValue(f func(*MonitorValue) *float64) ([]FlatMonitorValue, error) {
var res []FlatMonitorValue
@ -293,8 +342,10 @@ func (m *MonitorValue) HasValue() bool {
m.TotalMemorySize, m.UsedMemorySize,
m.TotalDisk1Size, m.UsedDisk1Size,
m.TotalDisk2Size, m.UsedDisk2Size,
m.BinlogUsedSizeKiB, m.DelayTimeSec,
m.FreeDiskSize, m.ResponseTimeSec,
m.UplinkBPS, m.DownlinkBPS,
m.ActiveConnections, m.ConnectionsPerSec,
}
for _, v := range values {
if v != nil {

View file

@ -1,5 +1,9 @@
package sacloud
import (
"encoding/json"
)
// NFS NFS
type NFS struct {
*Appliance // アプライアンス共通属性
@ -11,47 +15,97 @@ type NFS struct {
// NFSRemark リマーク
type NFSRemark struct {
*ApplianceRemarkBase
propPlanID
Plan *struct {
ID json.Number `json:",omitempty"`
} `json:",omitempty"` // プラン
// TODO Zone
//Zone *Resource
//SourceAppliance *Resource // クローン元DB
}
// SetRemarkPlanID プランID設定
func (n NFSRemark) SetRemarkPlanID(planID int64) {
if n.Plan == nil {
n.Plan = &struct {
ID json.Number `json:",omitempty"`
}{}
}
n.Plan.ID = json.Number(planID)
}
// NFSSettings NFS設定リスト
type NFSSettings struct {
}
// NFSPlan NFSプラン
// NFSPlan プラン(HDD/SSD)
type NFSPlan int
var (
// NFSPlan100G 100Gプラン
NFSPlan100G = NFSPlan(100)
// NFSPlan500G 500Gプラン
NFSPlan500G = NFSPlan(500)
// NFSPlan1T 1T(1024GB)プラン
NFSPlan1T = NFSPlan(1024 * 1)
// NFSPlan2T 2T(2048GB)プラン
NFSPlan2T = NFSPlan(1024 * 2)
// NFSPlan4T 4T(4096GB)プラン
NFSPlan4T = NFSPlan(1024 * 4)
// NFSPlanHDD 標準プラン(HDD)
NFSPlanHDD = NFSPlan(1)
// NFSPlanSSD SSHプラン
NFSPlanSSD = NFSPlan(2)
)
// AllowNFSPlans 指定可能なNFSプラン
func AllowNFSPlans() []int {
// String NFSプランの文字列表現
func (p NFSPlan) String() string {
switch p {
case NFSPlanHDD:
return "HDD"
case NFSPlanSSD:
return "SSD"
default:
return ""
}
}
// NFSSize NFSサイズ
type NFSSize int
var (
// NFSSize100G 100Gプラン
NFSSize100G = NFSSize(100)
// NFSSize500G 500Gプラン
NFSSize500G = NFSSize(500)
// NFSSize1T 1T(1024GB)プラン
NFSSize1T = NFSSize(1024 * 1)
// NFSSize2T 2T(2048GB)プラン
NFSSize2T = NFSSize(1024 * 2)
// NFSSize4T 4T(4096GB)プラン
NFSSize4T = NFSSize(1024 * 4)
// NFSSize8T 8TBプラン
NFSSize8T = NFSSize(1024 * 8)
// NFSSize12T 12TBプラン
NFSSize12T = NFSSize(1024 * 12)
)
// AllowNFSNormalPlanSizes 指定可能なNFSサイズ(標準プラン)
func AllowNFSNormalPlanSizes() []int {
return []int{
int(NFSPlan100G),
int(NFSPlan500G),
int(NFSPlan1T),
int(NFSPlan2T),
int(NFSPlan4T),
int(NFSSize100G),
int(NFSSize500G),
int(NFSSize1T),
int(NFSSize2T),
int(NFSSize4T),
int(NFSSize8T),
int(NFSSize12T),
}
}
// AllowNFSSSDPlanSizes 指定可能なNFSサイズ(SSDプラン)
func AllowNFSSSDPlanSizes() []int {
return []int{
int(NFSSize100G),
int(NFSSize500G),
int(NFSSize1T),
int(NFSSize2T),
int(NFSSize4T),
}
}
// CreateNFSValue NFS作成用パラメーター
type CreateNFSValue struct {
SwitchID string // 接続先スイッチID
Plan NFSPlan // プラン
IPAddress string // IPアドレス
MaskLen int // ネットワークマスク長
DefaultRoute string // デフォルトルート
@ -62,27 +116,16 @@ type CreateNFSValue struct {
SourceAppliance *Resource // クローン元NFS
}
// NewCreateNFSValue NFS作成用パラメーター
func NewCreateNFSValue() *CreateNFSValue {
return &CreateNFSValue{
Plan: NFSPlan100G,
}
}
// NewNFS NFS作成(冗長化なし)
func NewNFS(values *CreateNFSValue) *NFS {
if int(values.Plan) == 0 {
values.Plan = NFSPlan100G
}
return &NFS{
Appliance: &Appliance{
Class: "nfs",
propName: propName{Name: values.Name},
propDescription: propDescription{Description: values.Description},
propTags: propTags{Tags: values.Tags},
propPlanID: propPlanID{Plan: &Resource{ID: int64(values.Plan)}},
//propPlanID: propPlanID{Plan: &Resource{ID: int64(values.Plan)}},
propIcon: propIcon{
&Icon{
Resource: values.Icon,
@ -99,12 +142,107 @@ func NewNFS(values *CreateNFSValue) *NFS {
DefaultRoute: values.DefaultRoute,
},
Servers: []interface{}{
map[string]string{"IPAddress": values.IPAddress},
map[string]interface{}{"IPAddress": values.IPAddress},
},
},
propPlanID: propPlanID{Plan: &Resource{ID: int64(values.Plan)}},
//SourceAppliance: values.SourceAppliance,
//propPlanID: propPlanID{Plan: &Resource{ID: int64(values.Plan)}},
},
}
}
// IPAddress IPアドレスを取得
func (n *NFS) IPAddress() string {
if len(n.Remark.Servers) < 1 {
return ""
}
v, ok := n.Remark.Servers[0].(map[string]interface{})
if !ok {
return ""
}
if ip, ok := v["IPAddress"]; ok {
return ip.(string)
}
return ""
}
// NetworkMaskLen ネットワークマスク長を取得
func (n *NFS) NetworkMaskLen() int {
if n.Remark.Network == nil {
return -1
}
return n.Remark.Network.NetworkMaskLen
}
// DefaultRoute デフォルトゲートウェイを取得
func (n *NFS) DefaultRoute() string {
if n.Remark.Network == nil {
return ""
}
return n.Remark.Network.DefaultRoute
}
// NFSPlans NFSプラン
type NFSPlans struct {
HDD []NFSPlanValue
SSD []NFSPlanValue
}
// FindPlanID プランとサイズからプランIDを取得
func (p NFSPlans) FindPlanID(plan NFSPlan, size NFSSize) int64 {
var plans []NFSPlanValue
switch plan {
case NFSPlanHDD:
plans = p.HDD
case NFSPlanSSD:
plans = p.SSD
default:
return -1
}
for _, plan := range plans {
if plan.Availability == "available" && plan.Size == int(size) {
res, err := plan.PlanID.Int64()
if err != nil {
return -1
}
return res
}
}
return -1
}
// FindByPlanID プランIDから該当プランを取得
func (p NFSPlans) FindByPlanID(planID int64) (NFSPlan, *NFSPlanValue) {
for _, plan := range p.SSD {
id, err := plan.PlanID.Int64()
if err != nil {
continue
}
if id == planID {
return NFSPlanSSD, &plan
}
}
for _, plan := range p.HDD {
id, err := plan.PlanID.Int64()
if err != nil {
continue
}
if id == planID {
return NFSPlanHDD, &plan
}
}
return NFSPlan(-1), nil
}
// NFSPlanValue NFSプラン
type NFSPlanValue struct {
Size int `json:"size"`
Availability string `json:"availability"`
PlanID json.Number `json:"planId"`
}

View file

@ -27,6 +27,10 @@ const (
SophosUTM
// FreeBSD OS種別:FreeBSD
FreeBSD
// Netwiser OS種別: Netwiser Virtual Edition
Netwiser
// OPNsense OS種別: OPNsense
OPNsense
// Windows2012 OS種別:Windows Server 2012 R2 Datacenter Edition
Windows2012
// Windows2012RDS OS種別:Windows Server 2012 R2 for RDS
@ -41,10 +45,16 @@ const (
Windows2016RDSOffice
// Windows2016SQLServerWeb OS種別:Windows Server 2016 SQLServer(Web)
Windows2016SQLServerWeb
// Windows2016SQLServerStandard OS種別:Windows Server 2016 SQLServer(Standard)
// Windows2016SQLServerStandard OS種別:Windows Server 2016 SQLServer 2016(Standard)
Windows2016SQLServerStandard
// Windows2016SQLServer2017Standard OS種別:Windows Server 2016 SQLServer 2017(Standard)
Windows2016SQLServer2017Standard
// Windows2016SQLServerStandardAll OS種別:Windows Server 2016 SQLServer(Standard) + RDS + Office
Windows2016SQLServerStandardAll
// Windows2016SQLServer2017StandardAll OS種別:Windows Server 2016 SQLServer 2017(Standard) + RDS + Office
Windows2016SQLServer2017StandardAll
// Windows2019 OS種別:Windows Server 2019 Datacenter Edition
Windows2019
// Custom OS種別:カスタム
Custom
)
@ -53,9 +63,12 @@ const (
var OSTypeShortNames = []string{
"centos", "centos6", "ubuntu", "debian", "vyos", "coreos",
"rancheros", "kusanagi", "sophos-utm", "freebsd",
"netwiser", "opnsense",
"windows2012", "windows2012-rds", "windows2012-rds-office",
"windows2016", "windows2016-rds", "windows2016-rds-office",
"windows2016-sql-web", "windows2016-sql-standard", "windows2016-sql-standard-all",
"windows2016-sql2017-standard", "windows2016-sql2017-standard-all",
"windows2019",
}
// IsWindows Windowsか
@ -63,7 +76,9 @@ func (o ArchiveOSTypes) IsWindows() bool {
switch o {
case Windows2012, Windows2012RDS, Windows2012RDSOffice,
Windows2016, Windows2016RDS, Windows2016RDSOffice,
Windows2016SQLServerWeb, Windows2016SQLServerStandard, Windows2016SQLServerStandardAll:
Windows2016SQLServerWeb, Windows2016SQLServerStandard, Windows2016SQLServerStandardAll,
Windows2016SQLServer2017Standard, Windows2016SQLServer2017StandardAll,
Windows2019:
return true
default:
return false
@ -103,6 +118,10 @@ func StrToOSType(osType string) ArchiveOSTypes {
return SophosUTM
case "freebsd":
return FreeBSD
case "netwiser":
return Netwiser
case "opnsense":
return OPNsense
case "windows2012":
return Windows2012
case "windows2012-rds":
@ -119,8 +138,14 @@ func StrToOSType(osType string) ArchiveOSTypes {
return Windows2016SQLServerWeb
case "windows2016-sql-standard":
return Windows2016SQLServerStandard
case "windows2016-sql2017-standard":
return Windows2016SQLServer2017Standard
case "windows2016-sql-standard-all":
return Windows2016SQLServerStandardAll
case "windows2016-sql2017-standard-all":
return Windows2016SQLServer2017StandardAll
case "windows2019":
return Windows2019
default:
return Custom
}

View file

@ -4,9 +4,9 @@ package ostype
import "strconv"
const _ArchiveOSTypes_name = "CentOSCentOS6UbuntuDebianVyOSCoreOSRancherOSKusanagiSophosUTMFreeBSDWindows2012Windows2012RDSWindows2012RDSOfficeWindows2016Windows2016RDSWindows2016RDSOfficeWindows2016SQLServerWebWindows2016SQLServerStandardWindows2016SQLServerStandardAllCustom"
const _ArchiveOSTypes_name = "CentOSCentOS6UbuntuDebianVyOSCoreOSRancherOSKusanagiSophosUTMFreeBSDNetwiserOPNsenseWindows2012Windows2012RDSWindows2012RDSOfficeWindows2016Windows2016RDSWindows2016RDSOfficeWindows2016SQLServerWebWindows2016SQLServerStandardWindows2016SQLServer2017StandardWindows2016SQLServerStandardAllWindows2016SQLServer2017StandardAllWindows2019Custom"
var _ArchiveOSTypes_index = [...]uint8{0, 6, 13, 19, 25, 29, 35, 44, 52, 61, 68, 79, 93, 113, 124, 138, 158, 181, 209, 240, 246}
var _ArchiveOSTypes_index = [...]uint16{0, 6, 13, 19, 25, 29, 35, 44, 52, 61, 68, 76, 84, 95, 109, 129, 140, 154, 174, 197, 225, 257, 288, 323, 334, 340}
func (i ArchiveOSTypes) String() string {
if i < 0 || i >= ArchiveOSTypes(len(_ArchiveOSTypes_index)-1) {

View file

@ -2,11 +2,12 @@ package sacloud
// ProductServer サーバープラン
type ProductServer struct {
*Resource // ID
propName // 名称
propDescription // 説明
propAvailability // 有功状態
propCPU // CPUコア数
propMemoryMB // メモリサイズ(MB単位)
propServiceClass // サービスクラス
*Resource // ID
propName // 名称
propDescription // 説明
propAvailability // 有功状態
propCPU // CPUコア数
propMemoryMB // メモリサイズ(MB単位)
propServiceClass // サービスクラス
Generation PlanGenerations `json:",omitempty"` // 世代
}

View file

@ -17,3 +17,8 @@ func (p *propMemoryMB) GetMemoryGB() int {
}
return p.MemoryMB / 1024
}
// SetMemoryGB サイズ(GB単位) 設定
func (p *propMemoryMB) SetMemoryGB(memoryGB int) {
p.MemoryMB = memoryGB * 1024
}

View file

@ -23,6 +23,15 @@ func (p *propServerPlan) SetServerPlanByID(planID string) {
p.ServerPlan.Resource = NewResourceByStringID(planID)
}
// SetServerPlanByValue サーバープラン設定(値指定)
func (p *propServerPlan) SetServerPlanByValue(cpu int, memoryGB int, gen PlanGenerations) {
plan := &ProductServer{}
plan.CPU = cpu
plan.SetMemoryGB(memoryGB)
plan.Generation = gen
p.ServerPlan = plan
}
// GetCPU CPUコア数 取得
func (p *propServerPlan) GetCPU() int {
if p.ServerPlan == nil {
@ -49,3 +58,7 @@ func (p *propServerPlan) GetMemoryGB() int {
return p.ServerPlan.GetMemoryGB()
}
func (p *propServerPlan) SetMemoryGB(memoryGB int) {
p.ServerPlan.SetMemoryGB(memoryGB)
}

View file

@ -0,0 +1,16 @@
package sacloud
// propWaitDiskMigration ディスク作成待ちフラグ内包型
type propWaitDiskMigration struct {
WaitDiskMigration bool `json:",omitempty"`
}
// GetWaitDiskMigration ディスク作成待ちフラグ 取得
func (p *propWaitDiskMigration) GetWaitDiskMigration() bool {
return p.WaitDiskMigration
}
// SetWaitDiskMigration ディスク作成待ちフラグ 設定
func (p *propWaitDiskMigration) SetWaitDiskMigration(f bool) {
p.WaitDiskMigration = f
}

517
vendor/github.com/sacloud/libsacloud/sacloud/proxylb.go generated vendored Normal file
View file

@ -0,0 +1,517 @@
package sacloud
import (
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"strconv"
"strings"
"time"
)
// ProxyLB ProxyLB(CommonServiceItem)
type ProxyLB struct {
*Resource // ID
propName // 名称
propDescription // 説明
propServiceClass // サービスクラス
propIcon // アイコン
propTags // タグ
propCreatedAt // 作成日時
propModifiedAt // 変更日時
propAvailability // 有効状態
Status *ProxyLBStatus `json:",omitempty"` // ステータス
Provider ProxyLBProvider `json:",omitempty"` // プロバイダ
Settings ProxyLBSettings `json:",omitempty"` // ProxyLB設定
}
// ProxyLBSettings ProxyLB設定
type ProxyLBSettings struct {
ProxyLB ProxyLBSetting `json:",omitempty"` // ProxyLB ProxyLBエントリー
}
// ProxyLBStatus ProxyLBステータス
type ProxyLBStatus struct {
FQDN string `json:",omitempty"` // 割り当てられたFQDN(site-*******.proxylb?.sakura.ne.jp) UseVIPFailoverがtrueの場合のみ有効
VirtualIPAddress string `json:",omitempty"` // 割り当てられたVIP UseVIPFailoverがfalseの場合のみ有効
ProxyNetworks []string `json:",omitempty"` // プロキシ元ネットワークアドレス(CIDR)
UseVIPFailover bool // VIPフェイルオーバ
}
// ProxyLBProvider プロバイダ
type ProxyLBProvider struct {
Class string `json:",omitempty"` // クラス
}
// CreateNewProxyLB ProxyLB作成
func CreateNewProxyLB(name string) *ProxyLB {
return &ProxyLB{
Resource: &Resource{},
propName: propName{Name: name},
Provider: ProxyLBProvider{
Class: "proxylb",
},
Settings: ProxyLBSettings{
ProxyLB: ProxyLBSetting{
HealthCheck: defaultProxyLBHealthCheck,
SorryServer: ProxyLBSorryServer{},
Servers: []ProxyLBServer{},
},
},
}
}
// ProxyLBPlan ProxyLBプラン
type ProxyLBPlan int
var (
// ProxyLBPlan1000 1,000cpsプラン
ProxyLBPlan1000 = ProxyLBPlan(1000)
// ProxyLBPlan5000 5,000cpsプラン
ProxyLBPlan5000 = ProxyLBPlan(5000)
// ProxyLBPlan10000 10,000cpsプラン
ProxyLBPlan10000 = ProxyLBPlan(10000)
// ProxyLBPlan50000 50,000cpsプラン
ProxyLBPlan50000 = ProxyLBPlan(50000)
// ProxyLBPlan100000 100,000cpsプラン
ProxyLBPlan100000 = ProxyLBPlan(100000)
)
// AllowProxyLBPlans 有効なプランIDリスト
var AllowProxyLBPlans = []int{
int(ProxyLBPlan1000),
int(ProxyLBPlan5000),
int(ProxyLBPlan10000),
int(ProxyLBPlan50000),
int(ProxyLBPlan100000),
}
// GetPlan プラン取得(デフォルト: 1000cps)
func (p *ProxyLB) GetPlan() ProxyLBPlan {
classes := strings.Split(p.ServiceClass, "/")
class, err := strconv.Atoi(classes[len(classes)-1])
if err != nil {
return ProxyLBPlan1000
}
return ProxyLBPlan(class)
}
// SetPlan プラン指定
func (p *ProxyLB) SetPlan(plan ProxyLBPlan) {
p.ServiceClass = fmt.Sprintf("cloud/proxylb/plain/%d", plan)
}
// SetHTTPHealthCheck HTTPヘルスチェック 設定
func (p *ProxyLB) SetHTTPHealthCheck(hostHeader, path string, delayLoop int) {
if delayLoop <= 0 {
delayLoop = 10
}
p.Settings.ProxyLB.HealthCheck.Protocol = "http"
p.Settings.ProxyLB.HealthCheck.Host = hostHeader
p.Settings.ProxyLB.HealthCheck.Path = path
p.Settings.ProxyLB.HealthCheck.DelayLoop = delayLoop
}
// SetTCPHealthCheck TCPヘルスチェック 設定
func (p *ProxyLB) SetTCPHealthCheck(delayLoop int) {
if delayLoop <= 0 {
delayLoop = 10
}
p.Settings.ProxyLB.HealthCheck.Protocol = "tcp"
p.Settings.ProxyLB.HealthCheck.Host = ""
p.Settings.ProxyLB.HealthCheck.Path = ""
p.Settings.ProxyLB.HealthCheck.DelayLoop = delayLoop
}
// SetSorryServer ソーリーサーバ 設定
func (p *ProxyLB) SetSorryServer(ipaddress string, port int) {
var pt *int
if port > 0 {
pt = &port
}
p.Settings.ProxyLB.SorryServer = ProxyLBSorryServer{
IPAddress: ipaddress,
Port: pt,
}
}
// ClearSorryServer ソーリーサーバ クリア
func (p *ProxyLB) ClearSorryServer() {
p.SetSorryServer("", 0)
}
// HasProxyLBServer ProxyLB配下にサーバーを保持しているか判定
func (p *ProxyLB) HasProxyLBServer() bool {
return len(p.Settings.ProxyLB.Servers) > 0
}
// ClearProxyLBServer ProxyLB配下のサーバーをクリア
func (p *ProxyLB) ClearProxyLBServer() {
p.Settings.ProxyLB.Servers = []ProxyLBServer{}
}
// AddBindPort バインドポート追加
func (p *ProxyLB) AddBindPort(mode string, port int) {
p.Settings.ProxyLB.AddBindPort(mode, port)
}
// DeleteBindPort バインドポート削除
func (p *ProxyLB) DeleteBindPort(mode string, port int) {
p.Settings.ProxyLB.DeleteBindPort(mode, port)
}
// ClearBindPorts バインドポート クリア
func (p *ProxyLB) ClearBindPorts() {
p.Settings.ProxyLB.BindPorts = []*ProxyLBBindPorts{}
}
// AddServer ProxyLB配下のサーバーを追加
func (p *ProxyLB) AddServer(ip string, port int, enabled bool) {
p.Settings.ProxyLB.AddServer(ip, port, enabled)
}
// DeleteServer ProxyLB配下のサーバーを削除
func (p *ProxyLB) DeleteServer(ip string, port int) {
p.Settings.ProxyLB.DeleteServer(ip, port)
}
// ProxyLBSetting ProxyLBセッティング
type ProxyLBSetting struct {
HealthCheck ProxyLBHealthCheck `json:",omitempty"` // ヘルスチェック
SorryServer ProxyLBSorryServer `json:",omitempty"` // ソーリーサーバー
BindPorts []*ProxyLBBindPorts `json:",omitempty"` // プロキシ方式(プロトコル&ポート)
Servers []ProxyLBServer `json:",omitempty"` // サーバー
}
// ProxyLBSorryServer ソーリーサーバ
type ProxyLBSorryServer struct {
IPAddress string // IPアドレス
Port *int // ポート
}
// AddBindPort バインドポート追加
func (s *ProxyLBSetting) AddBindPort(mode string, port int) {
var isExist bool
for i := range s.BindPorts {
if s.BindPorts[i].ProxyMode == mode && s.BindPorts[i].Port == port {
isExist = true
}
}
if !isExist {
s.BindPorts = append(s.BindPorts, &ProxyLBBindPorts{
ProxyMode: mode,
Port: port,
})
}
}
// DeleteBindPort バインドポート削除
func (s *ProxyLBSetting) DeleteBindPort(mode string, port int) {
var res []*ProxyLBBindPorts
for i := range s.BindPorts {
if s.BindPorts[i].ProxyMode != mode || s.BindPorts[i].Port != port {
res = append(res, s.BindPorts[i])
}
}
s.BindPorts = res
}
// AddServer ProxyLB配下のサーバーを追加
func (s *ProxyLBSetting) AddServer(ip string, port int, enabled bool) {
var record ProxyLBServer
var isExist = false
for i := range s.Servers {
if s.Servers[i].IPAddress == ip && s.Servers[i].Port == port {
isExist = true
s.Servers[i].Enabled = enabled
}
}
if !isExist {
record = ProxyLBServer{
IPAddress: ip,
Port: port,
Enabled: enabled,
}
s.Servers = append(s.Servers, record)
}
}
// DeleteServer ProxyLB配下のサーバーを削除
func (s *ProxyLBSetting) DeleteServer(ip string, port int) {
var res []ProxyLBServer
for i := range s.Servers {
if s.Servers[i].IPAddress != ip || s.Servers[i].Port != port {
res = append(res, s.Servers[i])
}
}
s.Servers = res
}
// AllowProxyLBBindModes プロキシ方式
var AllowProxyLBBindModes = []string{"http", "https"}
// ProxyLBBindPorts プロキシ方式
type ProxyLBBindPorts struct {
ProxyMode string `json:",omitempty"` // モード(プロトコル)
Port int `json:",omitempty"` // ポート
}
// ProxyLBServer ProxyLB配下のサーバー
type ProxyLBServer struct {
IPAddress string `json:",omitempty"` // IPアドレス
Port int `json:",omitempty"` // ポート
Enabled bool `json:",omitempty"` // 有効/無効
}
// NewProxyLBServer ProxyLB配下のサーバ作成
func NewProxyLBServer(ipaddress string, port int) *ProxyLBServer {
return &ProxyLBServer{
IPAddress: ipaddress,
Port: port,
Enabled: true,
}
}
// AllowProxyLBHealthCheckProtocols プロキシLBで利用できるヘルスチェックプロトコル
var AllowProxyLBHealthCheckProtocols = []string{"http", "tcp"}
// ProxyLBHealthCheck ヘルスチェック
type ProxyLBHealthCheck struct {
Protocol string `json:",omitempty"` // プロトコル
Host string `json:",omitempty"` // 対象ホスト
Path string `json:",omitempty"` // HTTPの場合のリクエストパス
DelayLoop int `json:",omitempty"` // 監視間隔
}
var defaultProxyLBHealthCheck = ProxyLBHealthCheck{
Protocol: "http",
Host: "",
Path: "/",
DelayLoop: 10,
}
// ProxyLBAdditionalCerts additional certificates
type ProxyLBAdditionalCerts []*ProxyLBCertificate
// ProxyLBCertificates ProxyLBのSSL証明書
type ProxyLBCertificates struct {
ServerCertificate string // サーバ証明書
IntermediateCertificate string // 中間証明書
PrivateKey string // 秘密鍵
CertificateEndDate time.Time `json:",omitempty"` // 有効期限
CertificateCommonName string `json:",omitempty"` // CommonName
AdditionalCerts ProxyLBAdditionalCerts
}
// UnmarshalJSON UnmarshalJSON(AdditionalCertsが空の場合に空文字を返す問題への対応)
func (p *ProxyLBAdditionalCerts) UnmarshalJSON(data []byte) error {
targetData := strings.Replace(strings.Replace(string(data), " ", "", -1), "\n", "", -1)
if targetData == `` {
return nil
}
var certs []*ProxyLBCertificate
if err := json.Unmarshal(data, &certs); err != nil {
return err
}
*p = certs
return nil
}
// SetPrimaryCert PrimaryCertを設定
func (p *ProxyLBCertificates) SetPrimaryCert(cert *ProxyLBCertificate) {
p.ServerCertificate = cert.ServerCertificate
p.IntermediateCertificate = cert.IntermediateCertificate
p.PrivateKey = cert.PrivateKey
p.CertificateEndDate = cert.CertificateEndDate
p.CertificateCommonName = cert.CertificateCommonName
}
// SetPrimaryCertValue PrimaryCertを設定
func (p *ProxyLBCertificates) SetPrimaryCertValue(serverCert, intermediateCert, privateKey string) {
p.ServerCertificate = serverCert
p.IntermediateCertificate = intermediateCert
p.PrivateKey = privateKey
}
// AddAdditionalCert AdditionalCertを追加
func (p *ProxyLBCertificates) AddAdditionalCert(serverCert, intermediateCert, privateKey string) {
p.AdditionalCerts = append(p.AdditionalCerts, &ProxyLBCertificate{
ServerCertificate: serverCert,
IntermediateCertificate: intermediateCert,
PrivateKey: privateKey,
})
}
// RemoveAdditionalCertAt 指定のインデックスを持つAdditionalCertを削除
func (p *ProxyLBCertificates) RemoveAdditionalCertAt(index int) {
var certs []*ProxyLBCertificate
for i, cert := range p.AdditionalCerts {
if i != index {
certs = append(certs, cert)
}
}
p.AdditionalCerts = certs
}
// RemoveAdditionalCert 指定の内容を持つAdditionalCertを削除
func (p *ProxyLBCertificates) RemoveAdditionalCert(serverCert, intermediateCert, privateKey string) {
var certs []*ProxyLBCertificate
for _, cert := range p.AdditionalCerts {
if !(cert.ServerCertificate == serverCert && cert.IntermediateCertificate == intermediateCert && cert.PrivateKey == privateKey) {
certs = append(certs, cert)
}
}
p.AdditionalCerts = certs
}
// RemoveAdditionalCerts AdditionalCertsを全て削除
func (p *ProxyLBCertificates) RemoveAdditionalCerts() {
p.AdditionalCerts = []*ProxyLBCertificate{}
}
// UnmarshalJSON UnmarshalJSON(CertificateEndDateのtime.TimeへのUnmarshal対応)
func (p *ProxyLBCertificates) UnmarshalJSON(data []byte) error {
var tmp map[string]interface{}
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
p.ServerCertificate = tmp["ServerCertificate"].(string)
p.IntermediateCertificate = tmp["IntermediateCertificate"].(string)
p.PrivateKey = tmp["PrivateKey"].(string)
p.CertificateCommonName = tmp["CertificateCommonName"].(string)
endDate := tmp["CertificateEndDate"].(string)
if endDate != "" {
date, err := time.Parse("Jan _2 15:04:05 2006 MST", endDate)
if err != nil {
return err
}
p.CertificateEndDate = date
}
if _, ok := tmp["AdditionalCerts"].(string); !ok {
rawCerts, err := json.Marshal(tmp["AdditionalCerts"])
if err != nil {
return err
}
var additionalCerts ProxyLBAdditionalCerts
if err := json.Unmarshal(rawCerts, &additionalCerts); err != nil {
return err
}
p.AdditionalCerts = additionalCerts
}
return nil
}
// ParseServerCertificate サーバ証明書のパース
func (p *ProxyLBCertificates) ParseServerCertificate() (*x509.Certificate, error) {
cert, e := p.parseCertificate(p.ServerCertificate)
if e != nil {
return nil, e
}
return cert, nil
}
// ParseIntermediateCertificate 中間証明書のパース
func (p *ProxyLBCertificates) ParseIntermediateCertificate() (*x509.Certificate, error) {
cert, e := p.parseCertificate(p.IntermediateCertificate)
if e != nil {
return nil, e
}
return cert, nil
}
func (p *ProxyLBCertificates) parseCertificate(certPEM string) (*x509.Certificate, error) {
block, _ := pem.Decode([]byte(certPEM))
if block != nil {
return x509.ParseCertificate(block.Bytes)
}
return nil, fmt.Errorf("can't decode certificate")
}
// ProxyLBCertificate ProxyLBのSSL証明書詳細
type ProxyLBCertificate struct {
ServerCertificate string // サーバ証明書
IntermediateCertificate string // 中間証明書
PrivateKey string // 秘密鍵
CertificateEndDate time.Time `json:",omitempty"` // 有効期限
CertificateCommonName string `json:",omitempty"` // CommonName
}
// UnmarshalJSON UnmarshalJSON(CertificateEndDateのtime.TimeへのUnmarshal対応)
func (p *ProxyLBCertificate) UnmarshalJSON(data []byte) error {
var tmp map[string]interface{}
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
p.ServerCertificate = tmp["ServerCertificate"].(string)
p.IntermediateCertificate = tmp["IntermediateCertificate"].(string)
p.PrivateKey = tmp["PrivateKey"].(string)
p.CertificateCommonName = tmp["CertificateCommonName"].(string)
endDate := tmp["CertificateEndDate"].(string)
if endDate != "" {
date, err := time.Parse("Jan _2 15:04:05 2006 MST", endDate)
if err != nil {
return err
}
p.CertificateEndDate = date
}
return nil
}
// ParseServerCertificate サーバ証明書のパース
func (p *ProxyLBCertificate) ParseServerCertificate() (*x509.Certificate, error) {
cert, e := p.parseCertificate(p.ServerCertificate)
if e != nil {
return nil, e
}
return cert, nil
}
// ParseIntermediateCertificate 中間証明書のパース
func (p *ProxyLBCertificate) ParseIntermediateCertificate() (*x509.Certificate, error) {
cert, e := p.parseCertificate(p.IntermediateCertificate)
if e != nil {
return nil, e
}
return cert, nil
}
func (p *ProxyLBCertificate) parseCertificate(certPEM string) (*x509.Certificate, error) {
block, _ := pem.Decode([]byte(certPEM))
if block != nil {
return x509.ParseCertificate(block.Bytes)
}
return nil, fmt.Errorf("can't decode certificate")
}
// ProxyLBHealth ProxyLBのヘルスチェック戻り値
type ProxyLBHealth struct {
ActiveConn int // アクティブなコネクション数
CPS int // 秒あたりコネクション数
Servers []*ProxyLBHealthServer // 実サーバのステータス
CurrentVIP string // 現在のVIP
}
// ProxyLBHealthServer ProxyLBの実サーバのステータス
type ProxyLBHealthServer struct {
ActiveConn int // アクティブなコネクション数
Status string // ステータス(UP or DOWN)
IPAddress string // IPアドレス
Port string // ポート
CPS int // 秒あたりコネクション数
}

View file

@ -21,6 +21,7 @@ type Server struct {
propIcon // アイコン
propTags // タグ
propCreatedAt // 作成日時
propWaitDiskMigration // サーバ作成時のディスク作成待ち
}
// DNSServers サーバの所属するリージョンの推奨ネームサーバリスト
@ -43,10 +44,13 @@ func (s *Server) IPAddress() string {
// Gateway デフォルトゲートウェイアドレス
func (s *Server) Gateway() string {
if len(s.Interfaces) == 0 || s.Interfaces[0].Switch == nil || s.Interfaces[0].Switch.UserSubnet == nil {
if len(s.Interfaces) == 0 || s.Interfaces[0].Switch == nil {
return ""
}
return s.Interfaces[0].Switch.UserSubnet.DefaultRoute
if s.Interfaces[0].Switch.UserSubnet != nil {
return s.Interfaces[0].Switch.UserSubnet.DefaultRoute
}
return s.Interfaces[0].Switch.Subnet.DefaultRoute
}
// DefaultRoute デフォルトゲートウェイアドレス(Gatewayのエイリアス)
@ -56,10 +60,13 @@ func (s *Server) DefaultRoute() string {
// NetworkMaskLen サーバの1番目のNIC(eth0)のネットワークマスク長
func (s *Server) NetworkMaskLen() int {
if len(s.Interfaces) == 0 || s.Interfaces[0].Switch == nil || s.Interfaces[0].Switch.UserSubnet == nil {
if len(s.Interfaces) == 0 || s.Interfaces[0].Switch == nil {
return 0
}
return s.Interfaces[0].Switch.UserSubnet.NetworkMaskLen
if s.Interfaces[0].Switch.UserSubnet != nil {
return s.Interfaces[0].Switch.UserSubnet.NetworkMaskLen
}
return s.Interfaces[0].Switch.Subnet.NetworkMaskLen
}
// NetworkAddress サーバの1番目のNIC(eth0)のネットワークアドレス
@ -79,6 +86,119 @@ func (s *Server) CIDRIPAddress() string {
return ""
}
// UpstreamType 1番目(0番目)のNICの上流ネットワーク種別
func (s *Server) UpstreamType() EUpstreamNetworkType {
return s.UpstreamTypeAt(0)
}
// UpstreamTypeAt 指定インデックスのNICの上流ネットワーク種別
func (s *Server) UpstreamTypeAt(index int) EUpstreamNetworkType {
if len(s.Interfaces) <= index {
return EUpstreamNetworkUnknown
}
return s.Interfaces[index].UpstreamType()
}
// SwitchID 上流のスイッチのID
//
// NICがない、上流スイッチが見つからない、上流が共有セグメントの場合は-1を返す
func (s *Server) SwitchID() int64 {
return s.SwitchIDAt(0)
}
// SwitchIDAt 上流ネットワークのスイッチのID
//
// NICがない、上流スイッチが見つからない、上流が共有セグメントの場合は-1を返す
func (s *Server) SwitchIDAt(index int) int64 {
if len(s.Interfaces) <= index {
return -1
}
nic := s.Interfaces[index]
if nic.Switch == nil || nic.Switch.Scope == ESCopeShared {
return -1
}
return nic.Switch.ID
}
// SwitchName 上流のスイッチのID
//
// NICがない、上流スイッチが見つからない、上流が共有セグメントの場合は空文字を返す
func (s *Server) SwitchName() string {
return s.SwitchNameAt(0)
}
// SwitchNameAt 上流ネットワークのスイッチのID
//
// NICがない、上流スイッチが見つからない、上流が共有セグメントの場合は空文字を返す
func (s *Server) SwitchNameAt(index int) string {
if len(s.Interfaces) <= index {
return ""
}
nic := s.Interfaces[index]
if nic.Switch == nil || nic.Switch.Scope == ESCopeShared {
return ""
}
return nic.Switch.Name
}
// Bandwidth 上流ネットワークの帯域幅(単位:Mbps)
//
// -1: 1番目(0番目)のNICが存在しない場合 or 切断されている場合
// 0 : 制限なしの場合
// 以外: 帯域幅(Mbps)
func (s *Server) Bandwidth() int {
return s.BandwidthAt(0)
}
// BandwidthAt 上流ネットワークの帯域幅(単位:Mbps)
//
// -1: 存在しないインデックスを取得した場合 or 切断されている場合
// 0 : 制限なしの場合
// 以外: 帯域幅(Mbps)
func (s *Server) BandwidthAt(index int) int {
if len(s.Interfaces) <= index {
return -1
}
nic := s.Interfaces[index]
switch nic.UpstreamType() {
case EUpstreamNetworkNone:
return -1
case EUpstreamNetworkShared:
return 100
case EUpstreamNetworkSwitch, EUpstreamNetworkRouter:
//
// 上流ネットワークがスイッチだった場合の帯域制限
// https://manual.sakura.ad.jp/cloud/support/technical/network.html#support-network-03
//
// 専有ホストの場合は制限なし
if s.PrivateHost != nil {
return 0
}
// メモリに応じた制限
memory := s.GetMemoryGB()
switch {
case memory < 32:
return 1000
case 32 <= memory && memory < 128:
return 2000
case 128 <= memory && memory < 224:
return 5000
case 224 <= memory:
return 10000
default:
return -1
}
default:
return -1
}
}
const (
// ServerMaxInterfaceLen サーバーに接続できるNICの最大数
ServerMaxInterfaceLen = 10

View file

@ -2,10 +2,20 @@ package sacloud
import (
"encoding/json"
"strconv"
"strings"
"time"
)
const (
// SIMOperatorsKDDI KDDI
SIMOperatorsKDDI = "KDDI"
// SIMOperatorsDOCOMO Docomo
SIMOperatorsDOCOMO = "NTT DOCOMO"
// SIMOperatorsSoftBank SoftBank
SIMOperatorsSoftBank = "SoftBank"
)
// SIM SIM(CommonServiceItem)
type SIM struct {
*Resource // ID
@ -49,8 +59,8 @@ type SIMInfo struct {
// SIMTrafficBytes 当月通信量
type SIMTrafficBytes struct {
UplinkBytes int64 `json:"uplink_bytes,omitempty"`
DownlinkBytes int64 `json:"downlink_bytes,omitempty"`
UplinkBytes uint64 `json:"uplink_bytes,omitempty"`
DownlinkBytes uint64 `json:"downlink_bytes,omitempty"`
}
// UnmarshalJSON JSONアンマーシャル(配列、オブジェクトが混在するためここで対応)
@ -60,15 +70,22 @@ func (s *SIMTrafficBytes) UnmarshalJSON(data []byte) error {
return nil
}
tmp := &struct {
UplinkBytes int64 `json:"uplink_bytes,omitempty"`
DownlinkBytes int64 `json:"downlink_bytes,omitempty"`
UplinkBytes string `json:"uplink_bytes,omitempty"`
DownlinkBytes string `json:"downlink_bytes,omitempty"`
}{}
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
s.UplinkBytes = tmp.UplinkBytes
s.DownlinkBytes = tmp.DownlinkBytes
var err error
s.UplinkBytes, err = strconv.ParseUint(tmp.UplinkBytes, 10, 64)
if err != nil {
return err
}
s.DownlinkBytes, err = strconv.ParseUint(tmp.DownlinkBytes, 10, 64)
if err != nil {
return err
}
return nil
}
@ -93,6 +110,18 @@ type SIMLog struct {
IMSI string `json:"imsi,omitempty"`
}
// SIMNetworkOperatorConfig SIM通信キャリア設定
type SIMNetworkOperatorConfig struct {
Allow bool `json:"allow,omitempty"`
CountryCode string `json:"country_code,omitempty"`
Name string `json:"name,omitempty"`
}
// SIMNetworkOperatorConfigs SIM通信キャリア設定 リクエストパラメータ
type SIMNetworkOperatorConfigs struct {
NetworkOperatorConfigs []*SIMNetworkOperatorConfig `json:"network_operator_config,omitempty"`
}
// CreateNewSIM SIM作成
func CreateNewSIM(name string, iccID string, passcode string) *SIM {
return &SIM{

View file

@ -1,5 +1,7 @@
package sacloud
import "time"
// SimpleMonitor シンプル監視
type SimpleMonitor struct {
*Resource // ID
@ -47,18 +49,20 @@ type SimpleMonitorProvider struct {
// SimpleMonitorHealthCheck ヘルスチェック
type SimpleMonitorHealthCheck struct {
Protocol string `json:",omitempty"` // プロトコル
Port string `json:",omitempty"` // ポート
Path string `json:",omitempty"` // HTTP/HTTPS監視の場合のリクエストパス
Status string `json:",omitempty"` // HTTP/HTTPS監視の場合の期待ステータスコード
SNI string `json:",omitempty"` // HTTPS監視時のSNI有効/無効
Host string `json:",omitempty"` // 対象ホスト(IP or FQDN)
QName string `json:",omitempty"` // DNS監視の場合の問い合わせFQDN
ExpectedData string `json:",omitempty"` // 期待値
Community string `json:",omitempty"` // SNMP監視の場合のコミュニティ名
SNMPVersion string `json:",omitempty"` // SNMP監視 SNMPバージョン
OID string `json:",omitempty"` // SNMP監視 OID
RemainingDays int `json:",omitempty"` // SSL証明書 有効残日数
Protocol string `json:",omitempty"` // プロトコル
Port string `json:",omitempty"` // ポート
Path string `json:",omitempty"` // HTTP/HTTPS監視の場合のリクエストパス
Status string `json:",omitempty"` // HTTP/HTTPS監視の場合の期待ステータスコード
SNI string `json:",omitempty"` // HTTPS監視時のSNI有効/無効
Host string `json:",omitempty"` // 対象ホスト(IP or FQDN)
BasicAuthUsername string `json:",omitempty"` // HTTP/HTTPS監視の場合のBASIC認証 ユーザー名
BasicAuthPassword string `json:",omitempty"` // HTTP/HTTPS監視の場合のBASIC認証 パスワード
QName string `json:",omitempty"` // DNS監視の場合の問い合わせFQDN
ExpectedData string `json:",omitempty"` // 期待値
Community string `json:",omitempty"` // SNMP監視の場合のコミュニティ名
SNMPVersion string `json:",omitempty"` // SNMP監視 SNMPバージョン
OID string `json:",omitempty"` // SNMP監視 OID
RemainingDays int `json:",omitempty"` // SSL証明書 有効残日数
}
// SimpleMonitorNotify シンプル監視通知
@ -68,6 +72,33 @@ type SimpleMonitorNotify struct {
IncomingWebhooksURL string `json:",omitempty"` // Slack通知の場合のWebhook URL
}
// ESimpleMonitorHealth シンプル監視ステータス
type ESimpleMonitorHealth string
var (
// EHealthUp Up
EHealthUp = ESimpleMonitorHealth("UP")
// EHealthDown Down
EHealthDown = ESimpleMonitorHealth("DOWN")
)
// IsUp アップ
func (e ESimpleMonitorHealth) IsUp() bool {
return e == EHealthUp
}
// IsDown ダウン
func (e ESimpleMonitorHealth) IsDown() bool {
return e == EHealthDown
}
// SimpleMonitorHealthCheckStatus シンプル監視ステータス
type SimpleMonitorHealthCheckStatus struct {
LastCheckedAt time.Time
LastHealthChangedAt time.Time
Health ESimpleMonitorHealth
}
// CreateNewSimpleMonitor シンプル監視作成
func CreateNewSimpleMonitor(target string) *SimpleMonitor {
return &SimpleMonitor{
@ -142,29 +173,33 @@ func (s *SimpleMonitor) SetHealthCheckTCP(port string) {
}
// SetHealthCheckHTTP HTTPでのヘルスチェック設定
func (s *SimpleMonitor) SetHealthCheckHTTP(port string, path string, status string, host string) {
func (s *SimpleMonitor) SetHealthCheckHTTP(port string, path string, status string, host string, user, pass string) {
s.Settings.SimpleMonitor.HealthCheck = &SimpleMonitorHealthCheck{
Protocol: "http",
Port: port,
Path: path,
Status: status,
Host: host,
Protocol: "http",
Port: port,
Path: path,
Status: status,
Host: host,
BasicAuthUsername: user,
BasicAuthPassword: pass,
}
}
// SetHealthCheckHTTPS HTTPSでのヘルスチェック設定
func (s *SimpleMonitor) SetHealthCheckHTTPS(port string, path string, status string, host string, sni bool) {
func (s *SimpleMonitor) SetHealthCheckHTTPS(port string, path string, status string, host string, sni bool, user, pass string) {
strSNI := "False"
if sni {
strSNI = "True"
}
s.Settings.SimpleMonitor.HealthCheck = &SimpleMonitorHealthCheck{
Protocol: "https",
Port: port,
Path: path,
Status: status,
Host: host,
SNI: strSNI,
Protocol: "https",
Port: port,
Path: path,
Status: status,
Host: host,
SNI: strSNI,
BasicAuthUsername: user,
BasicAuthPassword: pass,
}
}

View file

@ -15,6 +15,7 @@ type Switch struct {
propIcon // アイコン
propTags // タグ
propCreatedAt // 作成日時
propZone // ゾーン
ServerCount int `json:",omitempty"` // 接続サーバー数
ApplianceCount int `json:",omitempty"` // 接続アプライアンス数

View file

@ -236,3 +236,134 @@ func (v *VPCRouter) FindBelongsInterface(ip net.IP) (int, *VPCRouterInterface) {
}
return -1, nil
}
// IPAddress1 1番目(0番目)のNICのIPアドレス1
func (v *VPCRouter) IPAddress1() string {
return v.IPAddress1At(0)
}
// IPAddress1At 指定インデックスのNICのIPアドレス1
func (v *VPCRouter) IPAddress1At(index int) string {
if len(v.Interfaces) <= index {
return ""
}
if index == 0 {
if v.IsStandardPlan() {
return v.Interfaces[0].IPAddress
}
if !v.HasInterfaces() {
return ""
}
if len(v.Settings.Router.Interfaces[0].IPAddress) < 1 {
return ""
}
return v.Settings.Router.Interfaces[0].IPAddress[0]
}
nic := v.Settings.Router.Interfaces[index]
if len(nic.IPAddress) < 1 {
return ""
}
return nic.IPAddress[0]
}
// IPAddress2 1番目(0番目)のNICのIPアドレス2
func (v *VPCRouter) IPAddress2() string {
return v.IPAddress2At(0)
}
// IPAddress2At 指定インデックスのNICのIPアドレス2
func (v *VPCRouter) IPAddress2At(index int) string {
if v.IsStandardPlan() {
return ""
}
if len(v.Interfaces) <= index {
return ""
}
if index == 0 {
if !v.HasInterfaces() {
return ""
}
if len(v.Settings.Router.Interfaces[0].IPAddress) < 2 {
return ""
}
return v.Settings.Router.Interfaces[0].IPAddress[1]
}
nic := v.Settings.Router.Interfaces[index]
if len(nic.IPAddress) < 2 {
return ""
}
return nic.IPAddress[1]
}
// VirtualIPAddress 1番目(0番目)のNICのVIP
func (v *VPCRouter) VirtualIPAddress() string {
return v.VirtualIPAddressAt(0)
}
// VirtualIPAddressAt 指定インデックスのNICのVIP
func (v *VPCRouter) VirtualIPAddressAt(index int) string {
if v.IsStandardPlan() {
return ""
}
if len(v.Interfaces) <= index {
return ""
}
return v.Settings.Router.Interfaces[0].VirtualIPAddress
}
// NetworkMaskLen 1番目(0番目)のNICのネットワークマスク長
func (v *VPCRouter) NetworkMaskLen() int {
return v.NetworkMaskLenAt(0)
}
// NetworkMaskLenAt 指定インデックスのNICのネットワークマスク長
func (v *VPCRouter) NetworkMaskLenAt(index int) int {
if !v.HasInterfaces() {
return -1
}
if len(v.Interfaces) <= index {
return -1
}
if index == 0 {
return v.Interfaces[0].Switch.Subnet.NetworkMaskLen
}
return v.Settings.Router.Interfaces[index].NetworkMaskLen
}
// Zone スイッチから現在のゾーン名を取得
//
// Note: 共有セグメント接続時は取得不能
func (v *VPCRouter) Zone() string {
if v.Switch != nil {
return v.Switch.GetZoneName()
}
if len(v.Interfaces) > 0 && v.Interfaces[0].Switch != nil {
return v.Interfaces[0].Switch.GetZoneName()
}
return ""
}
// VRID VRIDを取得
//
// スタンダードプラン、またはVRIDの参照に失敗した場合は-1を返す
func (v *VPCRouter) VRID() int {
if v.IsStandardPlan() {
return -1
}
if !v.HasSetting() || v.Settings.Router.VRID == nil {
return -1
}
return *v.Settings.Router.VRID
}

View file

@ -22,6 +22,7 @@ type VPCRouterSetting struct {
RemoteAccessUsers *VPCRouterRemoteAccessUsers `json:",omitempty"` // リモートアクセスユーザー設定
SiteToSiteIPsecVPN *VPCRouterSiteToSiteIPsecVPN `json:",omitempty"` // サイト間VPN設定
StaticRoutes *VPCRouterStaticRoutes `json:",omitempty"` // スタティックルート設定
InternetConnection *VPCRouterInternetConnection `json:",omitempty"` // インターネット接続
VRID *int `json:",omitempty"` // VRID
SyslogHost string `json:",omitempty"` // syslog転送先ホスト
@ -1156,3 +1157,22 @@ func (s *VPCRouterSetting) FindStaticRoute(prefix string, nextHop string) (int,
}
return -1, nil
}
// VPCRouterInternetConnection インターネット接続
type VPCRouterInternetConnection struct {
Enabled string `json:",omitempty"` // 有効/無効
}
// SetInternetConnection インターネット接続 有効/無効 設定
func (s *VPCRouterSetting) SetInternetConnection(enabled bool) {
if s.InternetConnection == nil {
s.InternetConnection = &VPCRouterInternetConnection{
Enabled: "True",
}
}
if enabled {
s.InternetConnection.Enabled = "True"
} else {
s.InternetConnection.Enabled = "False"
}
}

View file

@ -5,4 +5,23 @@ type VPCRouterStatus struct {
FirewallReceiveLogs []string
FirewallSendLogs []string
VPNLogs []string
SessionCount int
DHCPServerLeases []struct {
IPAddress string
MACAddress string
}
L2TPIPsecServerSessions []struct {
User string
IPAddress string
TimeSec int
}
PPTPServerSessions []struct {
User string
IPAddress string
TimeSec int
}
SiteToSiteIPsecVPNPeers []struct {
Status string
Peer string
}
}

View file

@ -0,0 +1,43 @@
package mutexkv
import (
"sync"
)
// MutexKV is a simple key/value store for arbitrary mutexes. It can be used to
// serialize changes across arbitrary collaborators that share knowledge of the
// keys they must serialize on.
type MutexKV struct {
lock sync.Mutex
store map[string]*sync.Mutex
}
// Lock the mutex for the given key. Caller is responsible for calling Unlock
// for the same key
func (m *MutexKV) Lock(key string) {
m.get(key).Lock()
}
// Unlock the mutex for the given key. Caller must have called Lock for the same key first
func (m *MutexKV) Unlock(key string) {
m.get(key).Unlock()
}
// Returns a mutex for the given key, no guarantee of its lock status
func (m *MutexKV) get(key string) *sync.Mutex {
m.lock.Lock()
defer m.lock.Unlock()
mutex, ok := m.store[key]
if !ok {
mutex = &sync.Mutex{}
m.store[key] = mutex
}
return mutex
}
// NewMutexKV Returns a properly initalized MutexKV
func NewMutexKV() *MutexKV {
return &MutexKV{
store: make(map[string]*sync.Mutex),
}
}

202
vendor/go.opencensus.io/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

37
vendor/go.opencensus.io/internal/internal.go generated vendored Normal file
View file

@ -0,0 +1,37 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal // import "go.opencensus.io/internal"
import (
"fmt"
"time"
opencensus "go.opencensus.io"
)
// UserAgent is the user agent to be added to the outgoing
// requests from the exporters.
var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version())
// MonotonicEndTime returns the end time at present
// but offset from start, monotonically.
//
// The monotonic clock is used in subtractions hence
// the duration since start added back to start gives
// end as a monotonic time.
// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
func MonotonicEndTime(start time.Time) time.Time {
return start.Add(time.Now().Sub(start))
}

50
vendor/go.opencensus.io/internal/sanitize.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"strings"
"unicode"
)
const labelKeySizeLimit = 100
// Sanitize returns a string that is trunacated to 100 characters if it's too
// long, and replaces non-alphanumeric characters to underscores.
func Sanitize(s string) string {
if len(s) == 0 {
return s
}
if len(s) > labelKeySizeLimit {
s = s[:labelKeySizeLimit]
}
s = strings.Map(sanitizeRune, s)
if unicode.IsDigit(rune(s[0])) {
s = "key_" + s
}
if s[0] == '_' {
s = "key" + s
}
return s
}
// converts anything that is not a letter or digit to an underscore
func sanitizeRune(r rune) rune {
if unicode.IsLetter(r) || unicode.IsDigit(r) {
return r
}
// Everything else turns into an underscore
return '_'
}

View file

@ -0,0 +1,75 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Package tagencoding contains the tag encoding
// used interally by the stats collector.
package tagencoding // import "go.opencensus.io/internal/tagencoding"
// Values represent the encoded buffer for the values.
type Values struct {
Buffer []byte
WriteIndex int
ReadIndex int
}
func (vb *Values) growIfRequired(expected int) {
if len(vb.Buffer)-vb.WriteIndex < expected {
tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected)
copy(tmp, vb.Buffer)
vb.Buffer = tmp
}
}
// WriteValue is the helper method to encode Values from map[Key][]byte.
func (vb *Values) WriteValue(v []byte) {
length := len(v) & 0xff
vb.growIfRequired(1 + length)
// writing length of v
vb.Buffer[vb.WriteIndex] = byte(length)
vb.WriteIndex++
if length == 0 {
// No value was encoded for this key
return
}
// writing v
copy(vb.Buffer[vb.WriteIndex:], v[:length])
vb.WriteIndex += length
}
// ReadValue is the helper method to decode Values to a map[Key][]byte.
func (vb *Values) ReadValue() []byte {
// read length of v
length := int(vb.Buffer[vb.ReadIndex])
vb.ReadIndex++
if length == 0 {
// No value was encoded for this key
return nil
}
// read value of v
v := make([]byte, length)
endIdx := vb.ReadIndex + length
copy(v, vb.Buffer[vb.ReadIndex:endIdx])
vb.ReadIndex = endIdx
return v
}
// Bytes returns a reference to already written bytes in the Buffer.
func (vb *Values) Bytes() []byte {
return vb.Buffer[:vb.WriteIndex]
}

53
vendor/go.opencensus.io/internal/traceinternals.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"time"
)
// Trace allows internal access to some trace functionality.
// TODO(#412): remove this
var Trace interface{}
// LocalSpanStoreEnabled true if the local span store is enabled.
var LocalSpanStoreEnabled bool
// BucketConfiguration stores the number of samples to store for span buckets
// for successful and failed spans for a particular span name.
type BucketConfiguration struct {
Name string
MaxRequestsSucceeded int
MaxRequestsErrors int
}
// PerMethodSummary is a summary of the spans stored for a single span name.
type PerMethodSummary struct {
Active int
LatencyBuckets []LatencyBucketSummary
ErrorBuckets []ErrorBucketSummary
}
// LatencyBucketSummary is a summary of a latency bucket.
type LatencyBucketSummary struct {
MinLatency, MaxLatency time.Duration
Size int
}
// ErrorBucketSummary is a summary of an error bucket.
type ErrorBucketSummary struct {
ErrorCode int32
Size int
}

19
vendor/go.opencensus.io/metric/metricdata/doc.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metricdata contains the metrics data model.
//
// This is an EXPERIMENTAL package, and may change in arbitrary ways without
// notice.
package metricdata // import "go.opencensus.io/metric/metricdata"

38
vendor/go.opencensus.io/metric/metricdata/exemplar.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metricdata
import (
"time"
)
// Exemplars keys.
const (
AttachmentKeySpanContext = "SpanContext"
)
// Exemplar is an example data point associated with each bucket of a
// distribution type aggregation.
//
// Their purpose is to provide an example of the kind of thing
// (request, RPC, trace span, etc.) that resulted in that measurement.
type Exemplar struct {
Value float64 // the value that was recorded
Timestamp time.Time // the time the value was recorded
Attachments Attachments // attachments (if any)
}
// Attachments is a map of extra values associated with a recorded data point.
type Attachments map[string]interface{}

35
vendor/go.opencensus.io/metric/metricdata/label.go generated vendored Normal file
View file

@ -0,0 +1,35 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metricdata
// LabelKey represents key of a label. It has optional
// description attribute.
type LabelKey struct {
Key string
Description string
}
// LabelValue represents the value of a label.
// The zero value represents a missing label value, which may be treated
// differently to an empty string value by some back ends.
type LabelValue struct {
Value string // string value of the label
Present bool // flag that indicated whether a value is present or not
}
// NewLabelValue creates a new non-nil LabelValue that represents the given string.
func NewLabelValue(val string) LabelValue {
return LabelValue{Value: val, Present: true}
}

46
vendor/go.opencensus.io/metric/metricdata/metric.go generated vendored Normal file
View file

@ -0,0 +1,46 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metricdata
import (
"time"
"go.opencensus.io/resource"
)
// Descriptor holds metadata about a metric.
type Descriptor struct {
Name string // full name of the metric
Description string // human-readable description
Unit Unit // units for the measure
Type Type // type of measure
LabelKeys []LabelKey // label keys
}
// Metric represents a quantity measured against a resource with different
// label value combinations.
type Metric struct {
Descriptor Descriptor // metric descriptor
Resource *resource.Resource // resource against which this was measured
TimeSeries []*TimeSeries // one time series for each combination of label values
}
// TimeSeries is a sequence of points associated with a combination of label
// values.
type TimeSeries struct {
LabelValues []LabelValue // label values, same order as keys in the metric descriptor
Points []Point // points sequence
StartTime time.Time // time we started recording this time series
}

193
vendor/go.opencensus.io/metric/metricdata/point.go generated vendored Normal file
View file

@ -0,0 +1,193 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metricdata
import (
"time"
)
// Point is a single data point of a time series.
type Point struct {
// Time is the point in time that this point represents in a time series.
Time time.Time
// Value is the value of this point. Prefer using ReadValue to switching on
// the value type, since new value types might be added.
Value interface{}
}
//go:generate stringer -type ValueType
// NewFloat64Point creates a new Point holding a float64 value.
func NewFloat64Point(t time.Time, val float64) Point {
return Point{
Value: val,
Time: t,
}
}
// NewInt64Point creates a new Point holding an int64 value.
func NewInt64Point(t time.Time, val int64) Point {
return Point{
Value: val,
Time: t,
}
}
// NewDistributionPoint creates a new Point holding a Distribution value.
func NewDistributionPoint(t time.Time, val *Distribution) Point {
return Point{
Value: val,
Time: t,
}
}
// NewSummaryPoint creates a new Point holding a Summary value.
func NewSummaryPoint(t time.Time, val *Summary) Point {
return Point{
Value: val,
Time: t,
}
}
// ValueVisitor allows reading the value of a point.
type ValueVisitor interface {
VisitFloat64Value(float64)
VisitInt64Value(int64)
VisitDistributionValue(*Distribution)
VisitSummaryValue(*Summary)
}
// ReadValue accepts a ValueVisitor and calls the appropriate method with the
// value of this point.
// Consumers of Point should use this in preference to switching on the type
// of the value directly, since new value types may be added.
func (p Point) ReadValue(vv ValueVisitor) {
switch v := p.Value.(type) {
case int64:
vv.VisitInt64Value(v)
case float64:
vv.VisitFloat64Value(v)
case *Distribution:
vv.VisitDistributionValue(v)
case *Summary:
vv.VisitSummaryValue(v)
default:
panic("unexpected value type")
}
}
// Distribution contains summary statistics for a population of values. It
// optionally contains a histogram representing the distribution of those
// values across a set of buckets.
type Distribution struct {
// Count is the number of values in the population. Must be non-negative. This value
// must equal the sum of the values in bucket_counts if a histogram is
// provided.
Count int64
// Sum is the sum of the values in the population. If count is zero then this field
// must be zero.
Sum float64
// SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the
// population. For values x_i this is:
//
// Sum[i=1..n]((x_i - mean)^2)
//
// Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
// describes Welford's method for accumulating this sum in one pass.
//
// If count is zero then this field must be zero.
SumOfSquaredDeviation float64
// BucketOptions describes the bounds of the histogram buckets in this
// distribution.
//
// A Distribution may optionally contain a histogram of the values in the
// population.
//
// If nil, there is no associated histogram.
BucketOptions *BucketOptions
// Bucket If the distribution does not have a histogram, then omit this field.
// If there is a histogram, then the sum of the values in the Bucket counts
// must equal the value in the count field of the distribution.
Buckets []Bucket
}
// BucketOptions describes the bounds of the histogram buckets in this
// distribution.
type BucketOptions struct {
// Bounds specifies a set of bucket upper bounds.
// This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket
// index i are:
//
// [0, Bounds[i]) for i == 0
// [Bounds[i-1], Bounds[i]) for 0 < i < N-1
// [Bounds[i-1], +infinity) for i == N-1
Bounds []float64
}
// Bucket represents a single bucket (value range) in a distribution.
type Bucket struct {
// Count is the number of values in each bucket of the histogram, as described in
// bucket_bounds.
Count int64
// Exemplar associated with this bucket (if any).
Exemplar *Exemplar
}
// Summary is a representation of percentiles.
type Summary struct {
// Count is the cumulative count (if available).
Count int64
// Sum is the cumulative sum of values (if available).
Sum float64
// HasCountAndSum is true if Count and Sum are available.
HasCountAndSum bool
// Snapshot represents percentiles calculated over an arbitrary time window.
// The values in this struct can be reset at arbitrary unknown times, with
// the requirement that all of them are reset at the same time.
Snapshot Snapshot
}
// Snapshot represents percentiles over an arbitrary time.
// The values in this struct can be reset at arbitrary unknown times, with
// the requirement that all of them are reset at the same time.
type Snapshot struct {
// Count is the number of values in the snapshot. Optional since some systems don't
// expose this. Set to 0 if not available.
Count int64
// Sum is the sum of values in the snapshot. Optional since some systems don't
// expose this. If count is 0 then this field must be zero.
Sum float64
// Percentiles is a map from percentile (range (0-100.0]) to the value of
// the percentile.
Percentiles map[float64]float64
}
//go:generate stringer -type Type
// Type is the overall type of metric, including its value type and whether it
// represents a cumulative total (since the start time) or if it represents a
// gauge value.
type Type int
// Metric types.
const (
TypeGaugeInt64 Type = iota
TypeGaugeFloat64
TypeGaugeDistribution
TypeCumulativeInt64
TypeCumulativeFloat64
TypeCumulativeDistribution
TypeSummary
)

View file

@ -0,0 +1,16 @@
// Code generated by "stringer -type Type"; DO NOT EDIT.
package metricdata
import "strconv"
const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary"
var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128}
func (i Type) String() string {
if i < 0 || i >= Type(len(_Type_index)-1) {
return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Type_name[_Type_index[i]:_Type_index[i+1]]
}

27
vendor/go.opencensus.io/metric/metricdata/unit.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metricdata
// Unit is a string encoded according to the case-sensitive abbreviations from the
// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
type Unit string
// Predefined units. To record against a unit not represented here, create your
// own Unit type constant from a string.
const (
UnitDimensionless Unit = "1"
UnitBytes Unit = "By"
UnitMilliseconds Unit = "ms"
)

View file

@ -0,0 +1,78 @@
// Copyright 2019, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metricproducer
import (
"sync"
)
// Manager maintains a list of active producers. Producers can register
// with the manager to allow readers to read all metrics provided by them.
// Readers can retrieve all producers registered with the manager,
// read metrics from the producers and export them.
type Manager struct {
mu sync.RWMutex
producers map[Producer]struct{}
}
var prodMgr *Manager
var once sync.Once
// GlobalManager is a single instance of producer manager
// that is used by all producers and all readers.
func GlobalManager() *Manager {
once.Do(func() {
prodMgr = &Manager{}
prodMgr.producers = make(map[Producer]struct{})
})
return prodMgr
}
// AddProducer adds the producer to the Manager if it is not already present.
func (pm *Manager) AddProducer(producer Producer) {
if producer == nil {
return
}
pm.mu.Lock()
defer pm.mu.Unlock()
pm.producers[producer] = struct{}{}
}
// DeleteProducer deletes the producer from the Manager if it is present.
func (pm *Manager) DeleteProducer(producer Producer) {
if producer == nil {
return
}
pm.mu.Lock()
defer pm.mu.Unlock()
delete(pm.producers, producer)
}
// GetAll returns a slice of all producer currently registered with
// the Manager. For each call it generates a new slice. The slice
// should not be cached as registration may change at any time. It is
// typically called periodically by exporter to read metrics from
// the producers.
func (pm *Manager) GetAll() []Producer {
pm.mu.Lock()
defer pm.mu.Unlock()
producers := make([]Producer, len(pm.producers))
i := 0
for producer := range pm.producers {
producers[i] = producer
i++
}
return producers
}

View file

@ -0,0 +1,28 @@
// Copyright 2019, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metricproducer
import (
"go.opencensus.io/metric/metricdata"
)
// Producer is a source of metrics.
type Producer interface {
// Read should return the current values of all metrics supported by this
// metric provider.
// The returned metrics should be unique for each combination of name and
// resource.
Read() []*metricdata.Metric
}

21
vendor/go.opencensus.io/opencensus.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package opencensus contains Go support for OpenCensus.
package opencensus // import "go.opencensus.io"
// Version is the current release version of OpenCensus in use.
func Version() string {
return "0.21.0"
}

117
vendor/go.opencensus.io/plugin/ochttp/client.go generated vendored Normal file
View file

@ -0,0 +1,117 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"net/http"
"net/http/httptrace"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
)
// Transport is an http.RoundTripper that instruments all outgoing requests with
// OpenCensus stats and tracing.
//
// The zero value is intended to be a useful default, but for
// now it's recommended that you explicitly set Propagation, since the default
// for this may change.
type Transport struct {
// Base may be set to wrap another http.RoundTripper that does the actual
// requests. By default http.DefaultTransport is used.
//
// If base HTTP roundtripper implements CancelRequest,
// the returned round tripper will be cancelable.
Base http.RoundTripper
// Propagation defines how traces are propagated. If unspecified, a default
// (currently B3 format) will be used.
Propagation propagation.HTTPFormat
// StartOptions are applied to the span started by this Transport around each
// request.
//
// StartOptions.SpanKind will always be set to trace.SpanKindClient
// for spans started by this transport.
StartOptions trace.StartOptions
// GetStartOptions allows to set start options per request. If set,
// StartOptions is going to be ignored.
GetStartOptions func(*http.Request) trace.StartOptions
// NameFromRequest holds the function to use for generating the span name
// from the information found in the outgoing HTTP Request. By default the
// name equals the URL Path.
FormatSpanName func(*http.Request) string
// NewClientTrace may be set to a function allowing the current *trace.Span
// to be annotated with HTTP request event information emitted by the
// httptrace package.
NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
// TODO: Implement tag propagation for HTTP.
}
// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
rt := t.base()
if isHealthEndpoint(req.URL.Path) {
return rt.RoundTrip(req)
}
// TODO: remove excessive nesting of http.RoundTrippers here.
format := t.Propagation
if format == nil {
format = defaultFormat
}
spanNameFormatter := t.FormatSpanName
if spanNameFormatter == nil {
spanNameFormatter = spanNameFromURL
}
startOpts := t.StartOptions
if t.GetStartOptions != nil {
startOpts = t.GetStartOptions(req)
}
rt = &traceTransport{
base: rt,
format: format,
startOptions: trace.StartOptions{
Sampler: startOpts.Sampler,
SpanKind: trace.SpanKindClient,
},
formatSpanName: spanNameFormatter,
newClientTrace: t.NewClientTrace,
}
rt = statsTransport{base: rt}
return rt.RoundTrip(req)
}
func (t *Transport) base() http.RoundTripper {
if t.Base != nil {
return t.Base
}
return http.DefaultTransport
}
// CancelRequest cancels an in-flight request by closing its connection.
func (t *Transport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := t.base().(canceler); ok {
cr.CancelRequest(req)
}
}

143
vendor/go.opencensus.io/plugin/ochttp/client_stats.go generated vendored Normal file
View file

@ -0,0 +1,143 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"context"
"io"
"net/http"
"strconv"
"sync"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
)
// statsTransport is an http.RoundTripper that collects stats for the outgoing requests.
type statsTransport struct {
base http.RoundTripper
}
// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request.
func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
ctx, _ := tag.New(req.Context(),
tag.Upsert(KeyClientHost, req.Host),
tag.Upsert(Host, req.Host),
tag.Upsert(KeyClientPath, req.URL.Path),
tag.Upsert(Path, req.URL.Path),
tag.Upsert(KeyClientMethod, req.Method),
tag.Upsert(Method, req.Method))
req = req.WithContext(ctx)
track := &tracker{
start: time.Now(),
ctx: ctx,
}
if req.Body == nil {
// TODO: Handle cases where ContentLength is not set.
track.reqSize = -1
} else if req.ContentLength > 0 {
track.reqSize = req.ContentLength
}
stats.Record(ctx, ClientRequestCount.M(1))
// Perform request.
resp, err := t.base.RoundTrip(req)
if err != nil {
track.statusCode = http.StatusInternalServerError
track.end()
} else {
track.statusCode = resp.StatusCode
if req.Method != "HEAD" {
track.respContentLength = resp.ContentLength
}
if resp.Body == nil {
track.end()
} else {
track.body = resp.Body
resp.Body = wrappedBody(track, resp.Body)
}
}
return resp, err
}
// CancelRequest cancels an in-flight request by closing its connection.
func (t statsTransport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := t.base.(canceler); ok {
cr.CancelRequest(req)
}
}
type tracker struct {
ctx context.Context
respSize int64
respContentLength int64
reqSize int64
start time.Time
body io.ReadCloser
statusCode int
endOnce sync.Once
}
var _ io.ReadCloser = (*tracker)(nil)
func (t *tracker) end() {
t.endOnce.Do(func() {
latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond)
respSize := t.respSize
if t.respSize == 0 && t.respContentLength > 0 {
respSize = t.respContentLength
}
m := []stats.Measurement{
ClientSentBytes.M(t.reqSize),
ClientReceivedBytes.M(respSize),
ClientRoundtripLatency.M(latencyMs),
ClientLatency.M(latencyMs),
ClientResponseBytes.M(t.respSize),
}
if t.reqSize >= 0 {
m = append(m, ClientRequestBytes.M(t.reqSize))
}
stats.RecordWithTags(t.ctx, []tag.Mutator{
tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)),
tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)),
}, m...)
})
}
func (t *tracker) Read(b []byte) (int, error) {
n, err := t.body.Read(b)
t.respSize += int64(n)
switch err {
case nil:
return n, nil
case io.EOF:
t.end()
}
return n, err
}
func (t *tracker) Close() error {
// Invoking endSpan on Close will help catch the cases
// in which a read returned a non-nil error, we set the
// span status but didn't end the span.
t.end()
return t.body.Close()
}

19
vendor/go.opencensus.io/plugin/ochttp/doc.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ochttp provides OpenCensus instrumentation for net/http package.
//
// For server instrumentation, see Handler. For client-side instrumentation,
// see Transport.
package ochttp // import "go.opencensus.io/plugin/ochttp"

View file

@ -0,0 +1,123 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package b3 contains a propagation.HTTPFormat implementation
// for B3 propagation. See https://github.com/openzipkin/b3-propagation
// for more details.
package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3"
import (
"encoding/hex"
"net/http"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
)
// B3 headers that OpenCensus understands.
const (
TraceIDHeader = "X-B3-TraceId"
SpanIDHeader = "X-B3-SpanId"
SampledHeader = "X-B3-Sampled"
)
// HTTPFormat implements propagation.HTTPFormat to propagate
// traces in HTTP headers in B3 propagation format.
// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers
// because there are additional fields not represented in the
// OpenCensus span context. Spans created from the incoming
// header will be the direct children of the client-side span.
// Similarly, receiver of the outgoing spans should use client-side
// span created by OpenCensus as the parent.
type HTTPFormat struct{}
var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
// SpanContextFromRequest extracts a B3 span context from incoming requests.
func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader))
if !ok {
return trace.SpanContext{}, false
}
sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader))
if !ok {
return trace.SpanContext{}, false
}
sampled, _ := ParseSampled(req.Header.Get(SampledHeader))
return trace.SpanContext{
TraceID: tid,
SpanID: sid,
TraceOptions: sampled,
}, true
}
// ParseTraceID parses the value of the X-B3-TraceId header.
func ParseTraceID(tid string) (trace.TraceID, bool) {
if tid == "" {
return trace.TraceID{}, false
}
b, err := hex.DecodeString(tid)
if err != nil {
return trace.TraceID{}, false
}
var traceID trace.TraceID
if len(b) <= 8 {
// The lower 64-bits.
start := 8 + (8 - len(b))
copy(traceID[start:], b)
} else {
start := 16 - len(b)
copy(traceID[start:], b)
}
return traceID, true
}
// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers.
func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) {
if sid == "" {
return trace.SpanID{}, false
}
b, err := hex.DecodeString(sid)
if err != nil {
return trace.SpanID{}, false
}
start := 8 - len(b)
copy(spanID[start:], b)
return spanID, true
}
// ParseSampled parses the value of the X-B3-Sampled header.
func ParseSampled(sampled string) (trace.TraceOptions, bool) {
switch sampled {
case "true", "1":
return trace.TraceOptions(1), true
default:
return trace.TraceOptions(0), false
}
}
// SpanContextToRequest modifies the given request to include B3 headers.
func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:]))
req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:]))
var sampled string
if sc.IsSampled() {
sampled = "1"
} else {
sampled = "0"
}
req.Header.Set(SampledHeader, sampled)
}

61
vendor/go.opencensus.io/plugin/ochttp/route.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"context"
"net/http"
"go.opencensus.io/tag"
)
// SetRoute sets the http_server_route tag to the given value.
// It's useful when an HTTP framework does not support the http.Handler interface
// and using WithRouteTag is not an option, but provides a way to hook into the request flow.
func SetRoute(ctx context.Context, route string) {
if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok {
a.t = append(a.t, tag.Upsert(KeyServerRoute, route))
}
}
// WithRouteTag returns an http.Handler that records stats with the
// http_server_route tag set to the given value.
func WithRouteTag(handler http.Handler, route string) http.Handler {
return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator {
addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)}
ctx, _ := tag.New(r.Context(), addRoute...)
r = r.WithContext(ctx)
handler.ServeHTTP(w, r)
return addRoute
})
}
// taggedHandlerFunc is a http.Handler that returns tags describing the
// processing of the request. These tags will be recorded along with the
// measures in this package at the end of the request.
type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator
func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) {
tags := h(w, r)
if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok {
a.t = append(a.t, tags...)
}
}
type addedTagsKey struct{}
type addedTags struct {
t []tag.Mutator
}

449
vendor/go.opencensus.io/plugin/ochttp/server.go generated vendored Normal file
View file

@ -0,0 +1,449 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"context"
"io"
"net/http"
"strconv"
"sync"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
)
// Handler is an http.Handler wrapper to instrument your HTTP server with
// OpenCensus. It supports both stats and tracing.
//
// Tracing
//
// This handler is aware of the incoming request's span, reading it from request
// headers as configured using the Propagation field.
// The extracted span can be accessed from the incoming request's
// context.
//
// span := trace.FromContext(r.Context())
//
// The server span will be automatically ended at the end of ServeHTTP.
type Handler struct {
// Propagation defines how traces are propagated. If unspecified,
// B3 propagation will be used.
Propagation propagation.HTTPFormat
// Handler is the handler used to handle the incoming request.
Handler http.Handler
// StartOptions are applied to the span started by this Handler around each
// request.
//
// StartOptions.SpanKind will always be set to trace.SpanKindServer
// for spans started by this transport.
StartOptions trace.StartOptions
// GetStartOptions allows to set start options per request. If set,
// StartOptions is going to be ignored.
GetStartOptions func(*http.Request) trace.StartOptions
// IsPublicEndpoint should be set to true for publicly accessible HTTP(S)
// servers. If true, any trace metadata set on the incoming request will
// be added as a linked trace instead of being added as a parent of the
// current trace.
IsPublicEndpoint bool
// FormatSpanName holds the function to use for generating the span name
// from the information found in the incoming HTTP Request. By default the
// name equals the URL Path.
FormatSpanName func(*http.Request) string
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var tags addedTags
r, traceEnd := h.startTrace(w, r)
defer traceEnd()
w, statsEnd := h.startStats(w, r)
defer statsEnd(&tags)
handler := h.Handler
if handler == nil {
handler = http.DefaultServeMux
}
r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags))
handler.ServeHTTP(w, r)
}
func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) {
if isHealthEndpoint(r.URL.Path) {
return r, func() {}
}
var name string
if h.FormatSpanName == nil {
name = spanNameFromURL(r)
} else {
name = h.FormatSpanName(r)
}
ctx := r.Context()
startOpts := h.StartOptions
if h.GetStartOptions != nil {
startOpts = h.GetStartOptions(r)
}
var span *trace.Span
sc, ok := h.extractSpanContext(r)
if ok && !h.IsPublicEndpoint {
ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc,
trace.WithSampler(startOpts.Sampler),
trace.WithSpanKind(trace.SpanKindServer))
} else {
ctx, span = trace.StartSpan(ctx, name,
trace.WithSampler(startOpts.Sampler),
trace.WithSpanKind(trace.SpanKindServer),
)
if ok {
span.AddLink(trace.Link{
TraceID: sc.TraceID,
SpanID: sc.SpanID,
Type: trace.LinkTypeParent,
Attributes: nil,
})
}
}
span.AddAttributes(requestAttrs(r)...)
if r.Body == nil {
// TODO: Handle cases where ContentLength is not set.
} else if r.ContentLength > 0 {
span.AddMessageReceiveEvent(0, /* TODO: messageID */
int64(r.ContentLength), -1)
}
return r.WithContext(ctx), span.End
}
func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) {
if h.Propagation == nil {
return defaultFormat.SpanContextFromRequest(r)
}
return h.Propagation.SpanContextFromRequest(r)
}
func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) {
ctx, _ := tag.New(r.Context(),
tag.Upsert(Host, r.Host),
tag.Upsert(Path, r.URL.Path),
tag.Upsert(Method, r.Method))
track := &trackingResponseWriter{
start: time.Now(),
ctx: ctx,
writer: w,
}
if r.Body == nil {
// TODO: Handle cases where ContentLength is not set.
track.reqSize = -1
} else if r.ContentLength > 0 {
track.reqSize = r.ContentLength
}
stats.Record(ctx, ServerRequestCount.M(1))
return track.wrappedResponseWriter(), track.end
}
type trackingResponseWriter struct {
ctx context.Context
reqSize int64
respSize int64
start time.Time
statusCode int
statusLine string
endOnce sync.Once
writer http.ResponseWriter
}
// Compile time assertion for ResponseWriter interface
var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
var logTagsErrorOnce sync.Once
func (t *trackingResponseWriter) end(tags *addedTags) {
t.endOnce.Do(func() {
if t.statusCode == 0 {
t.statusCode = 200
}
span := trace.FromContext(t.ctx)
span.SetStatus(TraceStatus(t.statusCode, t.statusLine))
span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode)))
m := []stats.Measurement{
ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
ServerResponseBytes.M(t.respSize),
}
if t.reqSize >= 0 {
m = append(m, ServerRequestBytes.M(t.reqSize))
}
allTags := make([]tag.Mutator, len(tags.t)+1)
allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))
copy(allTags[1:], tags.t)
stats.RecordWithTags(t.ctx, allTags, m...)
})
}
func (t *trackingResponseWriter) Header() http.Header {
return t.writer.Header()
}
func (t *trackingResponseWriter) Write(data []byte) (int, error) {
n, err := t.writer.Write(data)
t.respSize += int64(n)
// Add message event for request bytes sent.
span := trace.FromContext(t.ctx)
span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1)
return n, err
}
func (t *trackingResponseWriter) WriteHeader(statusCode int) {
t.writer.WriteHeader(statusCode)
t.statusCode = statusCode
t.statusLine = http.StatusText(t.statusCode)
}
// wrappedResponseWriter returns a wrapped version of the original
// ResponseWriter and only implements the same combination of additional
// interfaces as the original.
// This implementation is based on https://github.com/felixge/httpsnoop.
func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {
var (
hj, i0 = t.writer.(http.Hijacker)
cn, i1 = t.writer.(http.CloseNotifier)
pu, i2 = t.writer.(http.Pusher)
fl, i3 = t.writer.(http.Flusher)
rf, i4 = t.writer.(io.ReaderFrom)
)
switch {
case !i0 && !i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
}{t}
case !i0 && !i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
io.ReaderFrom
}{t, rf}
case !i0 && !i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Flusher
}{t, fl}
case !i0 && !i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Flusher
io.ReaderFrom
}{t, fl, rf}
case !i0 && !i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Pusher
}{t, pu}
case !i0 && !i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Pusher
io.ReaderFrom
}{t, pu, rf}
case !i0 && !i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Pusher
http.Flusher
}{t, pu, fl}
case !i0 && !i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Pusher
http.Flusher
io.ReaderFrom
}{t, pu, fl, rf}
case !i0 && i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.CloseNotifier
}{t, cn}
case !i0 && i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.CloseNotifier
io.ReaderFrom
}{t, cn, rf}
case !i0 && i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Flusher
}{t, cn, fl}
case !i0 && i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Flusher
io.ReaderFrom
}{t, cn, fl, rf}
case !i0 && i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Pusher
}{t, cn, pu}
case !i0 && i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Pusher
io.ReaderFrom
}{t, cn, pu, rf}
case !i0 && i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Pusher
http.Flusher
}{t, cn, pu, fl}
case !i0 && i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Pusher
http.Flusher
io.ReaderFrom
}{t, cn, pu, fl, rf}
case i0 && !i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
}{t, hj}
case i0 && !i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
io.ReaderFrom
}{t, hj, rf}
case i0 && !i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Flusher
}{t, hj, fl}
case i0 && !i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Flusher
io.ReaderFrom
}{t, hj, fl, rf}
case i0 && !i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Pusher
}{t, hj, pu}
case i0 && !i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Pusher
io.ReaderFrom
}{t, hj, pu, rf}
case i0 && !i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Pusher
http.Flusher
}{t, hj, pu, fl}
case i0 && !i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Pusher
http.Flusher
io.ReaderFrom
}{t, hj, pu, fl, rf}
case i0 && i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
}{t, hj, cn}
case i0 && i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
io.ReaderFrom
}{t, hj, cn, rf}
case i0 && i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Flusher
}{t, hj, cn, fl}
case i0 && i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Flusher
io.ReaderFrom
}{t, hj, cn, fl, rf}
case i0 && i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Pusher
}{t, hj, cn, pu}
case i0 && i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Pusher
io.ReaderFrom
}{t, hj, cn, pu, rf}
case i0 && i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Pusher
http.Flusher
}{t, hj, cn, pu, fl}
case i0 && i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Pusher
http.Flusher
io.ReaderFrom
}{t, hj, cn, pu, fl, rf}
default:
return struct {
http.ResponseWriter
}{t}
}
}

View file

@ -0,0 +1,169 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"crypto/tls"
"net/http"
"net/http/httptrace"
"strings"
"go.opencensus.io/trace"
)
type spanAnnotator struct {
sp *trace.Span
}
// TODO: Remove NewSpanAnnotator at the next release.
// NewSpanAnnotator returns a httptrace.ClientTrace which annotates
// all emitted httptrace events on the provided Span.
// Deprecated: Use NewSpanAnnotatingClientTrace instead
func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace {
return NewSpanAnnotatingClientTrace(r, s)
}
// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates
// all emitted httptrace events on the provided Span.
func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace {
sa := spanAnnotator{sp: s}
return &httptrace.ClientTrace{
GetConn: sa.getConn,
GotConn: sa.gotConn,
PutIdleConn: sa.putIdleConn,
GotFirstResponseByte: sa.gotFirstResponseByte,
Got100Continue: sa.got100Continue,
DNSStart: sa.dnsStart,
DNSDone: sa.dnsDone,
ConnectStart: sa.connectStart,
ConnectDone: sa.connectDone,
TLSHandshakeStart: sa.tlsHandshakeStart,
TLSHandshakeDone: sa.tlsHandshakeDone,
WroteHeaders: sa.wroteHeaders,
Wait100Continue: sa.wait100Continue,
WroteRequest: sa.wroteRequest,
}
}
func (s spanAnnotator) getConn(hostPort string) {
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.get_connection.host_port", hostPort),
}
s.sp.Annotate(attrs, "GetConn")
}
func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) {
attrs := []trace.Attribute{
trace.BoolAttribute("httptrace.got_connection.reused", info.Reused),
trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle),
}
if info.WasIdle {
attrs = append(attrs,
trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String()))
}
s.sp.Annotate(attrs, "GotConn")
}
// PutIdleConn implements a httptrace.ClientTrace hook
func (s spanAnnotator) putIdleConn(err error) {
var attrs []trace.Attribute
if err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.put_idle_connection.error", err.Error()))
}
s.sp.Annotate(attrs, "PutIdleConn")
}
func (s spanAnnotator) gotFirstResponseByte() {
s.sp.Annotate(nil, "GotFirstResponseByte")
}
func (s spanAnnotator) got100Continue() {
s.sp.Annotate(nil, "Got100Continue")
}
func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) {
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.dns_start.host", info.Host),
}
s.sp.Annotate(attrs, "DNSStart")
}
func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) {
var addrs []string
for _, addr := range info.Addrs {
addrs = append(addrs, addr.String())
}
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")),
}
if info.Err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.dns_done.error", info.Err.Error()))
}
s.sp.Annotate(attrs, "DNSDone")
}
func (s spanAnnotator) connectStart(network, addr string) {
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.connect_start.network", network),
trace.StringAttribute("httptrace.connect_start.addr", addr),
}
s.sp.Annotate(attrs, "ConnectStart")
}
func (s spanAnnotator) connectDone(network, addr string, err error) {
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.connect_done.network", network),
trace.StringAttribute("httptrace.connect_done.addr", addr),
}
if err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.connect_done.error", err.Error()))
}
s.sp.Annotate(attrs, "ConnectDone")
}
func (s spanAnnotator) tlsHandshakeStart() {
s.sp.Annotate(nil, "TLSHandshakeStart")
}
func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) {
var attrs []trace.Attribute
if err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error()))
}
s.sp.Annotate(attrs, "TLSHandshakeDone")
}
func (s spanAnnotator) wroteHeaders() {
s.sp.Annotate(nil, "WroteHeaders")
}
func (s spanAnnotator) wait100Continue() {
s.sp.Annotate(nil, "Wait100Continue")
}
func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) {
var attrs []trace.Attribute
if info.Err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error()))
}
s.sp.Annotate(attrs, "WroteRequest")
}

292
vendor/go.opencensus.io/plugin/ochttp/stats.go generated vendored Normal file
View file

@ -0,0 +1,292 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
)
// Deprecated: client HTTP measures.
var (
// Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect.
ClientRequestCount = stats.Int64(
"opencensus.io/http/client/request_count",
"Number of HTTP requests started",
stats.UnitDimensionless)
// Deprecated: Use ClientSentBytes.
ClientRequestBytes = stats.Int64(
"opencensus.io/http/client/request_bytes",
"HTTP request body size if set as ContentLength (uncompressed)",
stats.UnitBytes)
// Deprecated: Use ClientReceivedBytes.
ClientResponseBytes = stats.Int64(
"opencensus.io/http/client/response_bytes",
"HTTP response body size (uncompressed)",
stats.UnitBytes)
// Deprecated: Use ClientRoundtripLatency.
ClientLatency = stats.Float64(
"opencensus.io/http/client/latency",
"End-to-end latency",
stats.UnitMilliseconds)
)
// The following client HTTP measures are supported for use in custom views.
var (
ClientSentBytes = stats.Int64(
"opencensus.io/http/client/sent_bytes",
"Total bytes sent in request body (not including headers)",
stats.UnitBytes,
)
ClientReceivedBytes = stats.Int64(
"opencensus.io/http/client/received_bytes",
"Total bytes received in response bodies (not including headers but including error responses with bodies)",
stats.UnitBytes,
)
ClientRoundtripLatency = stats.Float64(
"opencensus.io/http/client/roundtrip_latency",
"Time between first byte of request headers sent to last byte of response received, or terminal error",
stats.UnitMilliseconds,
)
)
// The following server HTTP measures are supported for use in custom views:
var (
ServerRequestCount = stats.Int64(
"opencensus.io/http/server/request_count",
"Number of HTTP requests started",
stats.UnitDimensionless)
ServerRequestBytes = stats.Int64(
"opencensus.io/http/server/request_bytes",
"HTTP request body size if set as ContentLength (uncompressed)",
stats.UnitBytes)
ServerResponseBytes = stats.Int64(
"opencensus.io/http/server/response_bytes",
"HTTP response body size (uncompressed)",
stats.UnitBytes)
ServerLatency = stats.Float64(
"opencensus.io/http/server/latency",
"End-to-end latency",
stats.UnitMilliseconds)
)
// The following tags are applied to stats recorded by this package. Host, Path
// and Method are applied to all measures. StatusCode is not applied to
// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known.
var (
// Host is the value of the HTTP Host header.
//
// The value of this tag can be controlled by the HTTP client, so you need
// to watch out for potentially generating high-cardinality labels in your
// metrics backend if you use this tag in views.
Host, _ = tag.NewKey("http.host")
// StatusCode is the numeric HTTP response status code,
// or "error" if a transport error occurred and no status code was read.
StatusCode, _ = tag.NewKey("http.status")
// Path is the URL path (not including query string) in the request.
//
// The value of this tag can be controlled by the HTTP client, so you need
// to watch out for potentially generating high-cardinality labels in your
// metrics backend if you use this tag in views.
Path, _ = tag.NewKey("http.path")
// Method is the HTTP method of the request, capitalized (GET, POST, etc.).
Method, _ = tag.NewKey("http.method")
// KeyServerRoute is a low cardinality string representing the logical
// handler of the request. This is usually the pattern registered on the a
// ServeMux (or similar string).
KeyServerRoute, _ = tag.NewKey("http_server_route")
)
// Client tag keys.
var (
// KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.).
KeyClientMethod, _ = tag.NewKey("http_client_method")
// KeyClientPath is the URL path (not including query string).
KeyClientPath, _ = tag.NewKey("http_client_path")
// KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received.
KeyClientStatus, _ = tag.NewKey("http_client_status")
// KeyClientHost is the value of the request Host header.
KeyClientHost, _ = tag.NewKey("http_client_host")
)
// Default distributions used by views in this package.
var (
DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
)
// Package ochttp provides some convenience views for client measures.
// You still need to register these views for data to actually be collected.
var (
ClientSentBytesDistribution = &view.View{
Name: "opencensus.io/http/client/sent_bytes",
Measure: ClientSentBytes,
Aggregation: DefaultSizeDistribution,
Description: "Total bytes sent in request body (not including headers), by HTTP method and response status",
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
}
ClientReceivedBytesDistribution = &view.View{
Name: "opencensus.io/http/client/received_bytes",
Measure: ClientReceivedBytes,
Aggregation: DefaultSizeDistribution,
Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status",
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
}
ClientRoundtripLatencyDistribution = &view.View{
Name: "opencensus.io/http/client/roundtrip_latency",
Measure: ClientRoundtripLatency,
Aggregation: DefaultLatencyDistribution,
Description: "End-to-end latency, by HTTP method and response status",
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
}
ClientCompletedCount = &view.View{
Name: "opencensus.io/http/client/completed_count",
Measure: ClientRoundtripLatency,
Aggregation: view.Count(),
Description: "Count of completed requests, by HTTP method and response status",
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
}
)
// Deprecated: Old client Views.
var (
// Deprecated: No direct replacement, but see ClientCompletedCount.
ClientRequestCountView = &view.View{
Name: "opencensus.io/http/client/request_count",
Description: "Count of HTTP requests started",
Measure: ClientRequestCount,
Aggregation: view.Count(),
}
// Deprecated: Use ClientSentBytesDistribution.
ClientRequestBytesView = &view.View{
Name: "opencensus.io/http/client/request_bytes",
Description: "Size distribution of HTTP request body",
Measure: ClientSentBytes,
Aggregation: DefaultSizeDistribution,
}
// Deprecated: Use ClientReceivedBytesDistribution instead.
ClientResponseBytesView = &view.View{
Name: "opencensus.io/http/client/response_bytes",
Description: "Size distribution of HTTP response body",
Measure: ClientReceivedBytes,
Aggregation: DefaultSizeDistribution,
}
// Deprecated: Use ClientRoundtripLatencyDistribution instead.
ClientLatencyView = &view.View{
Name: "opencensus.io/http/client/latency",
Description: "Latency distribution of HTTP requests",
Measure: ClientRoundtripLatency,
Aggregation: DefaultLatencyDistribution,
}
// Deprecated: Use ClientCompletedCount instead.
ClientRequestCountByMethod = &view.View{
Name: "opencensus.io/http/client/request_count_by_method",
Description: "Client request count by HTTP method",
TagKeys: []tag.Key{Method},
Measure: ClientSentBytes,
Aggregation: view.Count(),
}
// Deprecated: Use ClientCompletedCount instead.
ClientResponseCountByStatusCode = &view.View{
Name: "opencensus.io/http/client/response_count_by_status_code",
Description: "Client response count by status code",
TagKeys: []tag.Key{StatusCode},
Measure: ClientRoundtripLatency,
Aggregation: view.Count(),
}
)
// Package ochttp provides some convenience views for server measures.
// You still need to register these views for data to actually be collected.
var (
ServerRequestCountView = &view.View{
Name: "opencensus.io/http/server/request_count",
Description: "Count of HTTP requests started",
Measure: ServerRequestCount,
Aggregation: view.Count(),
}
ServerRequestBytesView = &view.View{
Name: "opencensus.io/http/server/request_bytes",
Description: "Size distribution of HTTP request body",
Measure: ServerRequestBytes,
Aggregation: DefaultSizeDistribution,
}
ServerResponseBytesView = &view.View{
Name: "opencensus.io/http/server/response_bytes",
Description: "Size distribution of HTTP response body",
Measure: ServerResponseBytes,
Aggregation: DefaultSizeDistribution,
}
ServerLatencyView = &view.View{
Name: "opencensus.io/http/server/latency",
Description: "Latency distribution of HTTP requests",
Measure: ServerLatency,
Aggregation: DefaultLatencyDistribution,
}
ServerRequestCountByMethod = &view.View{
Name: "opencensus.io/http/server/request_count_by_method",
Description: "Server request count by HTTP method",
TagKeys: []tag.Key{Method},
Measure: ServerRequestCount,
Aggregation: view.Count(),
}
ServerResponseCountByStatusCode = &view.View{
Name: "opencensus.io/http/server/response_count_by_status_code",
Description: "Server response count by status code",
TagKeys: []tag.Key{StatusCode},
Measure: ServerLatency,
Aggregation: view.Count(),
}
)
// DefaultClientViews are the default client views provided by this package.
// Deprecated: No replacement. Register the views you would like individually.
var DefaultClientViews = []*view.View{
ClientRequestCountView,
ClientRequestBytesView,
ClientResponseBytesView,
ClientLatencyView,
ClientRequestCountByMethod,
ClientResponseCountByStatusCode,
}
// DefaultServerViews are the default server views provided by this package.
// Deprecated: No replacement. Register the views you would like individually.
var DefaultServerViews = []*view.View{
ServerRequestCountView,
ServerRequestBytesView,
ServerResponseBytesView,
ServerLatencyView,
ServerRequestCountByMethod,
ServerResponseCountByStatusCode,
}

239
vendor/go.opencensus.io/plugin/ochttp/trace.go generated vendored Normal file
View file

@ -0,0 +1,239 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"io"
"net/http"
"net/http/httptrace"
"go.opencensus.io/plugin/ochttp/propagation/b3"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
)
// TODO(jbd): Add godoc examples.
var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{}
// Attributes recorded on the span for the requests.
// Only trace exporters will need them.
const (
HostAttribute = "http.host"
MethodAttribute = "http.method"
PathAttribute = "http.path"
URLAttribute = "http.url"
UserAgentAttribute = "http.user_agent"
StatusCodeAttribute = "http.status_code"
)
type traceTransport struct {
base http.RoundTripper
startOptions trace.StartOptions
format propagation.HTTPFormat
formatSpanName func(*http.Request) string
newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
}
// TODO(jbd): Add message events for request and response size.
// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers.
// The created span can follow a parent span, if a parent is presented in
// the request's context.
func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
name := t.formatSpanName(req)
// TODO(jbd): Discuss whether we want to prefix
// outgoing requests with Sent.
ctx, span := trace.StartSpan(req.Context(), name,
trace.WithSampler(t.startOptions.Sampler),
trace.WithSpanKind(trace.SpanKindClient))
if t.newClientTrace != nil {
req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span)))
} else {
req = req.WithContext(ctx)
}
if t.format != nil {
// SpanContextToRequest will modify its Request argument, which is
// contrary to the contract for http.RoundTripper, so we need to
// pass it a copy of the Request.
// However, the Request struct itself was already copied by
// the WithContext calls above and so we just need to copy the header.
header := make(http.Header)
for k, v := range req.Header {
header[k] = v
}
req.Header = header
t.format.SpanContextToRequest(span.SpanContext(), req)
}
span.AddAttributes(requestAttrs(req)...)
resp, err := t.base.RoundTrip(req)
if err != nil {
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
span.End()
return resp, err
}
span.AddAttributes(responseAttrs(resp)...)
span.SetStatus(TraceStatus(resp.StatusCode, resp.Status))
// span.End() will be invoked after
// a read from resp.Body returns io.EOF or when
// resp.Body.Close() is invoked.
bt := &bodyTracker{rc: resp.Body, span: span}
resp.Body = wrappedBody(bt, resp.Body)
return resp, err
}
// bodyTracker wraps a response.Body and invokes
// trace.EndSpan on encountering io.EOF on reading
// the body of the original response.
type bodyTracker struct {
rc io.ReadCloser
span *trace.Span
}
var _ io.ReadCloser = (*bodyTracker)(nil)
func (bt *bodyTracker) Read(b []byte) (int, error) {
n, err := bt.rc.Read(b)
switch err {
case nil:
return n, nil
case io.EOF:
bt.span.End()
default:
// For all other errors, set the span status
bt.span.SetStatus(trace.Status{
// Code 2 is the error code for Internal server error.
Code: 2,
Message: err.Error(),
})
}
return n, err
}
func (bt *bodyTracker) Close() error {
// Invoking endSpan on Close will help catch the cases
// in which a read returned a non-nil error, we set the
// span status but didn't end the span.
bt.span.End()
return bt.rc.Close()
}
// CancelRequest cancels an in-flight request by closing its connection.
func (t *traceTransport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := t.base.(canceler); ok {
cr.CancelRequest(req)
}
}
func spanNameFromURL(req *http.Request) string {
return req.URL.Path
}
func requestAttrs(r *http.Request) []trace.Attribute {
userAgent := r.UserAgent()
attrs := make([]trace.Attribute, 0, 5)
attrs = append(attrs,
trace.StringAttribute(PathAttribute, r.URL.Path),
trace.StringAttribute(URLAttribute, r.URL.String()),
trace.StringAttribute(HostAttribute, r.Host),
trace.StringAttribute(MethodAttribute, r.Method),
)
if userAgent != "" {
attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent))
}
return attrs
}
func responseAttrs(resp *http.Response) []trace.Attribute {
return []trace.Attribute{
trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)),
}
}
// TraceStatus is a utility to convert the HTTP status code to a trace.Status that
// represents the outcome as closely as possible.
func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
var code int32
if httpStatusCode < 200 || httpStatusCode >= 400 {
code = trace.StatusCodeUnknown
}
switch httpStatusCode {
case 499:
code = trace.StatusCodeCancelled
case http.StatusBadRequest:
code = trace.StatusCodeInvalidArgument
case http.StatusGatewayTimeout:
code = trace.StatusCodeDeadlineExceeded
case http.StatusNotFound:
code = trace.StatusCodeNotFound
case http.StatusForbidden:
code = trace.StatusCodePermissionDenied
case http.StatusUnauthorized: // 401 is actually unauthenticated.
code = trace.StatusCodeUnauthenticated
case http.StatusTooManyRequests:
code = trace.StatusCodeResourceExhausted
case http.StatusNotImplemented:
code = trace.StatusCodeUnimplemented
case http.StatusServiceUnavailable:
code = trace.StatusCodeUnavailable
case http.StatusOK:
code = trace.StatusCodeOK
}
return trace.Status{Code: code, Message: codeToStr[code]}
}
var codeToStr = map[int32]string{
trace.StatusCodeOK: `OK`,
trace.StatusCodeCancelled: `CANCELLED`,
trace.StatusCodeUnknown: `UNKNOWN`,
trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`,
trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`,
trace.StatusCodeNotFound: `NOT_FOUND`,
trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`,
trace.StatusCodePermissionDenied: `PERMISSION_DENIED`,
trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`,
trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`,
trace.StatusCodeAborted: `ABORTED`,
trace.StatusCodeOutOfRange: `OUT_OF_RANGE`,
trace.StatusCodeUnimplemented: `UNIMPLEMENTED`,
trace.StatusCodeInternal: `INTERNAL`,
trace.StatusCodeUnavailable: `UNAVAILABLE`,
trace.StatusCodeDataLoss: `DATA_LOSS`,
trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`,
}
func isHealthEndpoint(path string) bool {
// Health checking is pretty frequent and
// traces collected for health endpoints
// can be extremely noisy and expensive.
// Disable canonical health checking endpoints
// like /healthz and /_ah/health for now.
if path == "/healthz" || path == "/_ah/health" {
return true
}
return false
}

44
vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
// Copyright 2019, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"io"
)
// wrappedBody returns a wrapped version of the original
// Body and only implements the same combination of additional
// interfaces as the original.
func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser {
var (
wr, i0 = body.(io.Writer)
)
switch {
case !i0:
return struct {
io.ReadCloser
}{wrapper}
case i0:
return struct {
io.ReadCloser
io.Writer
}{wrapper, wr}
default:
return struct {
io.ReadCloser
}{wrapper}
}
}

164
vendor/go.opencensus.io/resource/resource.go generated vendored Normal file
View file

@ -0,0 +1,164 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package resource provides functionality for resource, which capture
// identifying information about the entities for which signals are exported.
package resource
import (
"context"
"fmt"
"os"
"regexp"
"sort"
"strconv"
"strings"
)
// Environment variables used by FromEnv to decode a resource.
const (
EnvVarType = "OC_RESOURCE_TYPE"
EnvVarLabels = "OC_RESOURCE_LABELS"
)
// Resource describes an entity about which identifying information and metadata is exposed.
// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace.
type Resource struct {
Type string
Labels map[string]string
}
// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable.
func EncodeLabels(labels map[string]string) string {
sortedKeys := make([]string, 0, len(labels))
for k := range labels {
sortedKeys = append(sortedKeys, k)
}
sort.Strings(sortedKeys)
s := ""
for i, k := range sortedKeys {
if i > 0 {
s += ","
}
s += k + "=" + strconv.Quote(labels[k])
}
return s
}
var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`)
// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable.
// A list of labels of the form `<key1>="<value1>",<key2>="<value2>",...` is accepted.
// Domain names and paths are accepted as label keys.
// Most users will want to use FromEnv instead.
func DecodeLabels(s string) (map[string]string, error) {
m := map[string]string{}
// Ensure a trailing comma, which allows us to keep the regex simpler
s = strings.TrimRight(strings.TrimSpace(s), ",") + ","
for len(s) > 0 {
match := labelRegex.FindStringSubmatch(s)
if len(match) == 0 {
return nil, fmt.Errorf("invalid label formatting, remainder: %s", s)
}
v := match[2]
if v == "" {
v = match[3]
} else {
var err error
if v, err = strconv.Unquote(v); err != nil {
return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err)
}
}
m[match[1]] = v
s = s[len(match[0]):]
}
return m, nil
}
// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE
// and OC_RESOURCE_labelS environment variables.
func FromEnv(context.Context) (*Resource, error) {
res := &Resource{
Type: strings.TrimSpace(os.Getenv(EnvVarType)),
}
labels := strings.TrimSpace(os.Getenv(EnvVarLabels))
if labels == "" {
return res, nil
}
var err error
if res.Labels, err = DecodeLabels(labels); err != nil {
return nil, err
}
return res, nil
}
var _ Detector = FromEnv
// merge resource information from b into a. In case of a collision, a takes precedence.
func merge(a, b *Resource) *Resource {
if a == nil {
return b
}
if b == nil {
return a
}
res := &Resource{
Type: a.Type,
Labels: map[string]string{},
}
if res.Type == "" {
res.Type = b.Type
}
for k, v := range b.Labels {
res.Labels[k] = v
}
// Labels from resource a overwrite labels from resource b.
for k, v := range a.Labels {
res.Labels[k] = v
}
return res
}
// Detector attempts to detect resource information.
// If the detector cannot find resource information, the returned resource is nil but no
// error is returned.
// An error is only returned on unexpected failures.
type Detector func(context.Context) (*Resource, error)
// MultiDetector returns a Detector that calls all input detectors in order and
// merges each result with the previous one. In case a type of label key is already set,
// the first set value is takes precedence.
// It returns on the first error that a sub-detector encounters.
func MultiDetector(detectors ...Detector) Detector {
return func(ctx context.Context) (*Resource, error) {
return detectAll(ctx, detectors...)
}
}
// detectall calls all input detectors sequentially an merges each result with the previous one.
// It returns on the first error that a sub-detector encounters.
func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) {
var res *Resource
for _, d := range detectors {
r, err := d(ctx)
if err != nil {
return nil, err
}
res = merge(res, r)
}
return res, nil
}

69
vendor/go.opencensus.io/stats/doc.go generated vendored Normal file
View file

@ -0,0 +1,69 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
/*
Package stats contains support for OpenCensus stats recording.
OpenCensus allows users to create typed measures, record measurements,
aggregate the collected data, and export the aggregated data.
Measures
A measure represents a type of data point to be tracked and recorded.
For example, latency, request Mb/s, and response Mb/s are measures
to collect from a server.
Measure constructors such as Int64 and Float64 automatically
register the measure by the given name. Each registered measure needs
to be unique by name. Measures also have a description and a unit.
Libraries can define and export measures. Application authors can then
create views and collect and break down measures by the tags they are
interested in.
Recording measurements
Measurement is a data point to be collected for a measure. For example,
for a latency (ms) measure, 100 is a measurement that represents a 100ms
latency event. Measurements are created from measures with
the current context. Tags from the current context are recorded with the
measurements if they are any.
Recorded measurements are dropped immediately if no views are registered for them.
There is usually no need to conditionally enable and disable
recording to reduce cost. Recording of measurements is cheap.
Libraries can always record measurements, and applications can later decide
on which measurements they want to collect by registering views. This allows
libraries to turn on the instrumentation by default.
Exemplars
For a given recorded measurement, the associated exemplar is a diagnostic map
that gives more information about the measurement.
When aggregated using a Distribution aggregation, an exemplar is kept for each
bucket in the Distribution. This allows you to easily find an example of a
measurement that fell into each bucket.
For example, if you also use the OpenCensus trace package and you
record a measurement with a context that contains a sampled trace span,
then the trace span will be added to the exemplar associated with the measurement.
When exported to a supporting back end, you should be able to easily navigate
to example traces that fell into each bucket in the Distribution.
*/
package stats // import "go.opencensus.io/stats"

Some files were not shown because too many files have changed in this diff Show more