mirror of
https://github.com/containous/traefik.git
synced 2025-09-30 17:44:25 +03:00
Compare commits
21 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
9de3129a55 | ||
|
40ab1f325c | ||
|
73e0561610 | ||
|
0a89cccdc0 | ||
|
b102e6de5a | ||
|
2064a6f805 | ||
|
72e2ddff98 | ||
|
7db41967c6 | ||
|
a8680a8719 | ||
|
7e11fa1193 | ||
|
489b5a6150 | ||
|
8027e8ee23 | ||
|
d35d5bd2e8 | ||
|
8e47bdedc6 | ||
|
51419a9235 | ||
|
32fd52c698 | ||
|
468e4ebd98 | ||
|
92a57384a4 | ||
|
2067a433cd | ||
|
16f1f851cc | ||
|
8e992c7cfb |
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
export DOCKER_VERSION=17.03.1
|
||||
export DOCKER_VERSION=18.09.7
|
||||
|
||||
source .semaphoreci/vars
|
||||
|
||||
|
26
CHANGELOG.md
26
CHANGELOG.md
@@ -1,5 +1,31 @@
|
||||
# Change Log
|
||||
|
||||
## [v1.7.13](https://github.com/containous/traefik/tree/v1.7.13) (2019-08-07)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.12...v1.7.13)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** Update lego ([#5166](https://github.com/containous/traefik/pull/5166) by [dabeck](https://github.com/dabeck))
|
||||
- **[consulcatalog]** warning should not be a fail status ([#4537](https://github.com/containous/traefik/pull/4537) by [saez0pub](https://github.com/saez0pub))
|
||||
- **[docker]** Update docker api version ([#4909](https://github.com/containous/traefik/pull/4909) by [dtomcej](https://github.com/dtomcej))
|
||||
- **[dynamodb]** Use dynamodbav tags to override json tags. ([#5002](https://github.com/containous/traefik/pull/5002) by [ldez](https://github.com/ldez))
|
||||
- **[healthcheck]** Wrr loadbalancer honors old weight on recovered servers ([#5051](https://github.com/containous/traefik/pull/5051) by [DougWagner](https://github.com/DougWagner))
|
||||
- **[k8s]** Check for multiport services on Global Backend Ingress ([#5021](https://github.com/containous/traefik/pull/5021) by [dtomcej](https://github.com/dtomcej))
|
||||
- **[logs]** Allows logs to use local time zone instead of UTC ([#4954](https://github.com/containous/traefik/pull/4954) by [dduportal](https://github.com/dduportal))
|
||||
- **[middleware]** Clear TLS client headers if TLSMutualAuth is optional ([#4963](https://github.com/containous/traefik/pull/4963) by [stffabi](https://github.com/stffabi))
|
||||
- **[tls]** Add missing KeyUsages for default generated certificate ([#5150](https://github.com/containous/traefik/pull/5150) by [dtomcej](https://github.com/dtomcej))
|
||||
|
||||
**Documentation:**
|
||||
- **[acme]** Fixed doc link for AlibabaCloud ([#5109](https://github.com/containous/traefik/pull/5109) by [ddymko](https://github.com/ddymko))
|
||||
- **[docker]** Add example for CLI ([#5131](https://github.com/containous/traefik/pull/5131) by [alvarezbruned](https://github.com/alvarezbruned))
|
||||
- **[docker]** Use the latest stable version of traefik in the docs ([#4927](https://github.com/containous/traefik/pull/4927) by [kolaente](https://github.com/kolaente))
|
||||
- **[logs]** Update documentation to clarify the default format for logs ([#4953](https://github.com/containous/traefik/pull/4953) by [dduportal](https://github.com/dduportal))
|
||||
- **[rancher]** Add remarks about Rancher 2 ([#4999](https://github.com/containous/traefik/pull/4999) by [ldez](https://github.com/ldez))
|
||||
- **[tls]** Fixes the TLS Mutual Authentication documentation ([#5085](https://github.com/containous/traefik/pull/5085) by [jbdoumenjou](https://github.com/jbdoumenjou))
|
||||
- Format YAML example on user guide ([#5067](https://github.com/containous/traefik/pull/5067) by [gurayyildirim](https://github.com/gurayyildirim))
|
||||
- Update Slack support channel references to Discourse community forum ([#5014](https://github.com/containous/traefik/pull/5014) by [dduportal](https://github.com/dduportal))
|
||||
- Updating Service Fabric documentation ([#5160](https://github.com/containous/traefik/pull/5160) by [gheibia](https://github.com/gheibia))
|
||||
- Improve API / Dashboard wording in documentation ([#4929](https://github.com/containous/traefik/pull/4929) by [dduportal](https://github.com/dduportal))
|
||||
|
||||
## [v1.7.12](https://github.com/containous/traefik/tree/v1.7.12) (2019-05-29)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.11...v1.7.12)
|
||||
|
||||
|
267
Gopkg.lock
generated
267
Gopkg.lock
generated
@@ -17,6 +17,14 @@
|
||||
pruneopts = "NUT"
|
||||
revision = "02e53af36e6c978af692887ed449b74026d76fec"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3fb854fc35fafced21615a93db3c0e324c8cb1d5a89f39dd1e72db0c4200345a"
|
||||
name = "contrib.go.opencensus.io/exporter/ocagent"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "dcb33c7f3b7cfe67e8a2cea10207ede1b7c40764"
|
||||
version = "v0.4.12"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:24afd6a7be85997c981693ddcc5f7b37fd3ed5cb119901e14737b9635b944cfb"
|
||||
@@ -34,15 +42,15 @@
|
||||
revision = "72eebf980f467d3ab3a8b4ddf660f664911ce519"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:59f6c2fd10ad014d2907eaa48a19070d5e7af35624328a4fbbf93b7b1c4a19e8"
|
||||
digest = "1:1a06fd7e9325a0b2d1b44ff0253afac49857863cb86ba30ea8212690b7c6287e"
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = [
|
||||
"services/dns/mgmt/2017-09-01/dns",
|
||||
"version",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "068ec4d616be5b2175509bf1fb3e4c8ea160d5c8"
|
||||
version = "v15.0.1"
|
||||
revision = "98f2f9ff6ad63a307bb3438b7631af594b11f32b"
|
||||
version = "v31.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -56,19 +64,23 @@
|
||||
revision = "d6e3b3328b783f23731bc4d058875b0371ff8109"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2526235ff8cae0162b34e8af3779621de71b3a020dead9bcce74d3f43a18c988"
|
||||
digest = "1:847ae8f26c6796f0dbed826b7bc0297eae976fa2b24045e3e2ddb26846378a65"
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = [
|
||||
"autorest",
|
||||
"autorest/adal",
|
||||
"autorest/azure",
|
||||
"autorest/azure/auth",
|
||||
"autorest/azure/cli",
|
||||
"autorest/date",
|
||||
"autorest/to",
|
||||
"autorest/validation",
|
||||
"logger",
|
||||
"tracing",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "9ad9326b278af8fa5cc67c30c0ce9a58cc0862b2"
|
||||
version = "v10.6.0"
|
||||
revision = "ba1147dc57f993013ef255c128ca1cac8a557409"
|
||||
version = "v12.4.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -89,13 +101,6 @@
|
||||
pruneopts = "NUT"
|
||||
revision = "6add9cd6ad42d389d6ead1dde60b4ad71e46fd74"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ab7fee312bbdc8070d0325d841de8704cc78bf032b076200f1458659b74b8ed6"
|
||||
name = "github.com/JamesClonk/vultr"
|
||||
packages = ["lib"]
|
||||
pruneopts = "NUT"
|
||||
revision = "2fd0705ce648e602e6c9c57329a174270a4f6688"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:25870183293a3fb61cc9afd060a61d63a486f091db72af01a8ea3449f5ca530d"
|
||||
name = "github.com/Masterminds/goutils"
|
||||
@@ -215,7 +220,7 @@
|
||||
revision = "063d875e3c5fd734fa2aa12fac83829f62acfc70"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:47071ecf8d840dd357ede1b2aed46576bdd0a866adecef3c9e85a00db9672202"
|
||||
digest = "1:dddee1f9ce7caecc95ae089c721a65ed42b35f8cc8a8b8a3ee6e3758ec93ec4b"
|
||||
name = "github.com/akamai/AkamaiOPEN-edgegrid-golang"
|
||||
packages = [
|
||||
"client-v1",
|
||||
@@ -224,16 +229,17 @@
|
||||
"jsonhooks-v1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "1471ce9c14c6d8c007516e129262962a628fecdf"
|
||||
version = "v0.7.3"
|
||||
revision = "009960c8b2c7c57a0c5c488a3c8c778c16f3f586"
|
||||
version = "v0.7.4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:823e87ae25170339e2bfd1d6f7c2e27554c6bb5655f91c67b37bd5be45bb6b32"
|
||||
digest = "1:cd8348ab7b1ce85f965157e966791bb13514ea933902ecd1c010b5c529657f3d"
|
||||
name = "github.com/aliyun/alibaba-cloud-sdk-go"
|
||||
packages = [
|
||||
"sdk",
|
||||
"sdk/auth",
|
||||
"sdk/auth/credentials",
|
||||
"sdk/auth/credentials/provider",
|
||||
"sdk/auth/signers",
|
||||
"sdk/endpoints",
|
||||
"sdk/errors",
|
||||
@@ -243,8 +249,8 @@
|
||||
"services/alidns",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "cad214d7d71fba7883fcf3b7e550ba782c15b400"
|
||||
version = "1.27.7"
|
||||
revision = "26f0a5c2d5dc6d4a73c62651d146bdd51dc3bdde"
|
||||
version = "1.60.84"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b39cf81d5f440b9c0757a25058432d33af867e5201109bf53621356d9dab4b73"
|
||||
@@ -263,7 +269,7 @@
|
||||
revision = "48572f11356f1843b694f21a290d4f1006bc5e47"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bfb036834a43e76abd318f0db39b0bbec6f7865680c1e443475c0297250a89ed"
|
||||
digest = "1:bd56ffde8e12e9faca92f3ac54347c665d931856fba8784007b02e004fbcc30d"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
@@ -275,15 +281,20 @@
|
||||
"aws/credentials",
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/processcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/crr",
|
||||
"aws/csm",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/ini",
|
||||
"internal/sdkio",
|
||||
"internal/sdkrand",
|
||||
"internal/sdkuri",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/ec2query",
|
||||
@@ -302,10 +313,11 @@
|
||||
"service/lightsail",
|
||||
"service/route53",
|
||||
"service/sts",
|
||||
"service/sts/stsiface",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "fad131ddc707880428615dc8bc1587b55fb46d74"
|
||||
version = "v1.13.54"
|
||||
revision = "f3fe96892dc0e8e8e9862456e1455e62e4cb55d4"
|
||||
version = "v1.21.9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -332,12 +344,27 @@
|
||||
version = "v2.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:03cfacdc6bfd46007c15786c1ece3fa074f89e5193a292f0f26d9e98c99c7cc2"
|
||||
digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20"
|
||||
name = "github.com/census-instrumentation/opencensus-proto"
|
||||
packages = [
|
||||
"gen-go/agent/common/v1",
|
||||
"gen-go/agent/metrics/v1",
|
||||
"gen-go/agent/trace/v1",
|
||||
"gen-go/metrics/v1",
|
||||
"gen-go/resource/v1",
|
||||
"gen-go/trace/v1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "d89fa54de508111353cb0b06403c00569be780d8"
|
||||
version = "v0.2.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b63319bd1e224704c8a4d7d65c82791ef5ceb168b7c2c028193595064c910f1a"
|
||||
name = "github.com/cloudflare/cloudflare-go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "1f9007fbecae20711133c60519338c41cef1ffb4"
|
||||
version = "v0.8.5"
|
||||
revision = "b59d4f05eec361422b388c7ed74293ce8a3d5f97"
|
||||
version = "v0.9.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -450,12 +477,12 @@
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:d3972abc351cd5578e3281d332ac9e7a04dafc6e5af97c064fe396c4f90ee769"
|
||||
digest = "1:fa62421bd924623ac10a160686cc55d529f7274b2caedf7d2c607d14bc50c118"
|
||||
name = "github.com/decker502/dnspod-go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "f33a2c6040fc2550a631de7b3a53bddccdcd73fb"
|
||||
revision = "71fbbdbdf1a7eeac949586de15bf96d416d3dd63"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7a6852b35eb5bbc184561443762d225116ae630c26a7c4d90546619f1e7d2ad2"
|
||||
@@ -466,20 +493,20 @@
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:f9adc21a937e5da643ea14a3488cb7506788876737a5e205394e508627a6eec8"
|
||||
name = "github.com/dimchansky/utfbom"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e856fc44ab196970612bdc8c15e65ccf92ed8d4ccb3a2e65b88dc240a2fe5d0b"
|
||||
digest = "1:d9688055094edd046a71ee85204ea97090ecce6f316b739be3a50f5ca6403ebb"
|
||||
name = "github.com/dnsimple/dnsimple-go"
|
||||
packages = ["dnsimple"]
|
||||
pruneopts = "NUT"
|
||||
revision = "f5ead9c20763fd925dea1362f2af5d671ed2a459"
|
||||
version = "v0.21.0"
|
||||
revision = "7e193cc468a07cdf74d76de1e6736c709c5a3872"
|
||||
version = "v0.30.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cf7cba074c4d2f8e2a5cc2f10b1f6762c86cff2e39917b9f9a6dbd7df57fe9c9"
|
||||
@@ -690,12 +717,12 @@
|
||||
revision = "89ef8af493ab468a45a42bb0d89a06fccdd2fb22"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6f26e34204ddad638d25456c6443c2763aa38954558226722c253e08e9f491fa"
|
||||
digest = "1:efcc176c5cfbe8421dd3eb2562be91c3272e80f61b181e31d5745b7fee29a669"
|
||||
name = "github.com/exoscale/egoscale"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "0a91ac8209d6a805f259ff881d0c2654221d0346"
|
||||
version = "v0.14.3"
|
||||
revision = "8f608c40ae891e0240bb6e696a72437be7069d83"
|
||||
version = "v0.18.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b0d5e98ac0f0a509eb320f542e748582d637aae09e74538212e9712d1e71064b"
|
||||
@@ -735,7 +762,7 @@
|
||||
revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d82b2dc81c551e7c15f31523a2cc8ee9121b39cfbf63174d98a0bc8edf2d3c5e"
|
||||
digest = "1:896402a6d43c4fb1e21512e7473a51e7fd2b03a4e1a420a730a4b712d20e5d6a"
|
||||
name = "github.com/go-acme/lego"
|
||||
packages = [
|
||||
"acme",
|
||||
@@ -798,6 +825,7 @@
|
||||
"providers/dns/mydnsjp",
|
||||
"providers/dns/namecheap",
|
||||
"providers/dns/namedotcom",
|
||||
"providers/dns/namesilo",
|
||||
"providers/dns/netcup",
|
||||
"providers/dns/netcup/internal",
|
||||
"providers/dns/nifcloud",
|
||||
@@ -816,6 +844,7 @@
|
||||
"providers/dns/stackpath",
|
||||
"providers/dns/transip",
|
||||
"providers/dns/vegadns",
|
||||
"providers/dns/versio",
|
||||
"providers/dns/vscale",
|
||||
"providers/dns/vscale/internal",
|
||||
"providers/dns/vultr",
|
||||
@@ -823,8 +852,8 @@
|
||||
"registration",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "01903cdfb9869df45cf5274c53226823a2532f2d"
|
||||
version = "v2.6.0"
|
||||
revision = "295dd66f2aa549685e3d6b95c1813881a7807158"
|
||||
version = "v2.7.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "fork-containous"
|
||||
@@ -906,14 +935,6 @@
|
||||
pruneopts = "NUT"
|
||||
revision = "1d0bd113de87027671077d3c71eb3ac5d7dbba72"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:00f1b1d722a012f54670ecc09ee92557d7314bfd9152a085ce91933c1c14b026"
|
||||
name = "github.com/go-resty/resty"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "d4920dcf5b7689548a6db640278a9b35a5b48ec6"
|
||||
version = "v1.9.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8cf58169eb0a8c009ed3a4c36486980d602ab4cc4e478130493d6cd0404f889b"
|
||||
name = "github.com/go-stack/stack"
|
||||
@@ -922,6 +943,14 @@
|
||||
revision = "54be5f394ed2c3e19dac9134a40a95ba5a017f7b"
|
||||
version = "v1.5.4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bde9f189072512ba353f3641d4839cb4c9c7edf421e467f2c03f267b402bd16c"
|
||||
name = "github.com/gofrs/uuid"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "6b08a5c5172ba18946672b49749cde22873dd7c2"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b518b9be1fc76244e246afe09113e3dd6246073b444787d30883877b82a0b90d"
|
||||
name = "github.com/gogo/protobuf"
|
||||
@@ -943,18 +972,25 @@
|
||||
revision = "44145f04b68cf362d9c4df2182967c2275eaefed"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:03e14cff610a8a58b774e36bd337fa979482be86aab01be81fb8bbd6d0f07fc8"
|
||||
digest = "1:1e21a0bbb10d73b781e87a9a123b96f6ebe08d0451e50f1b97c1213b302af46f"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"jsonpb",
|
||||
"proto",
|
||||
"protoc-gen-go/descriptor",
|
||||
"protoc-gen-go/generator",
|
||||
"protoc-gen-go/generator/internal/remap",
|
||||
"protoc-gen-go/plugin",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/struct",
|
||||
"ptypes/timestamp",
|
||||
"ptypes/wrappers",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -1073,6 +1109,18 @@
|
||||
pruneopts = "NUT"
|
||||
revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3107a6ef51bb9f3bc7cf235af0d92303b39afaa841f02dfc87f1d0a5d05a3fca"
|
||||
name = "github.com/grpc-ecosystem/grpc-gateway"
|
||||
packages = [
|
||||
"internal",
|
||||
"runtime",
|
||||
"utilities",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "ad529a448ba494a88058f9e5be0988713174ac86"
|
||||
version = "v1.9.5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c3f14b698c0f5c5729896489f4b526f519d1d2522e697d63f532901d0e183dff"
|
||||
name = "github.com/hashicorp/consul"
|
||||
@@ -1176,11 +1224,11 @@
|
||||
revision = "8eebe170fa1ba25d3dfb928b3f86a7313b13b9fe"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ac6d01547ec4f7f673311b4663909269bfb8249952de3279799289467837c3cc"
|
||||
digest = "1:1f2aebae7e7c856562355ec0198d8ca2fa222fb05e5b1b66632a1fce39631885"
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "0b12d6b5"
|
||||
revision = "c2b33e84"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:273d270076cfc02c2d481b187203ebb3891825f46fe78b925141a22631c52503"
|
||||
@@ -1190,12 +1238,12 @@
|
||||
revision = "72f9bd7c4e0c2a40055ab3d0f09654f730cce982"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:63d8b364f0768ffda64b8e6e15c10535f3431e3c69d051dbb37f467ada02df98"
|
||||
digest = "1:92f6419f388bd31a433b1388910f15a882c9980e383e89ebf8fb2524583707ac"
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "28452fcdec4e44348d2af0d91d1e9e38da3a9e0a"
|
||||
version = "1.0.5"
|
||||
revision = "27518f6661eba504be5a7a9a9f6d9460d892ade3"
|
||||
version = "v1.1.7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8b3234b10eacd5edea45bf0c13a585b608749da23f94aaf29b46d9ef8a8babf4"
|
||||
@@ -1276,12 +1324,12 @@
|
||||
revision = "1113af38e5916529ad7317b0fe12e273e6e92af5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:111ff5a09a32895248270bfaef9b8b6ac163a8cde9cdd603fed64b3e4b59e8ab"
|
||||
digest = "1:7981caf0d67995c0acbd352785f45b177a2d089ed40f2005794c0772d0fbfff9"
|
||||
name = "github.com/linode/linodego"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "d0d31d8ca62fa3f7e4526ca0ce95de81e4ed001e"
|
||||
version = "v0.5.1"
|
||||
revision = "7adba57685c129bcd29a9edc7008ec3b05680240"
|
||||
version = "v0.10.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:196b0d7580e898df15a7cc5371cbfe2b8e22904f5c6c883ed5db0130e551c8fb"
|
||||
@@ -1394,12 +1442,12 @@
|
||||
revision = "d23ffcb85de31694d6ccaa23ccb4a03e55c1303f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b62c4f18ad6eb454ac5253e7791ded3d7867330015ca4b37b6336e57f514585e"
|
||||
digest = "1:f9f72e583aaacf1d1ac5d6121abd4afd3c690baa9e14e1d009df26bf831ba347"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "b8bc1bf767474819792c23f32d8286a45736f1c6"
|
||||
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -1425,6 +1473,22 @@
|
||||
pruneopts = "NUT"
|
||||
revision = "63d60e9d0dbc60cf9164e6510889b0db6683d98c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
|
||||
name = "github.com/modern-go/concurrent"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
|
||||
version = "1.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6"
|
||||
name = "github.com/modern-go/reflect2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7bb97a5f80a2429fa605e176e16cf682cbb5ac681f421a06efb4e3b8efae6e5f"
|
||||
name = "github.com/mvdan/xurls"
|
||||
@@ -1456,6 +1520,14 @@
|
||||
revision = "d8152159450570012552f924a0ae6ab3d8c617e0"
|
||||
version = "v0.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a1e8eda841bca324a6b4d88824d625b3ba8d86bf9ec68677d595c02d7502918b"
|
||||
name = "github.com/nrdcg/namesilo"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "1b10d027904464789f889ee4cd76af15c3bf4943"
|
||||
version = "v0.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:95d27e49401b61dd203a4cf8237037bd6cd49599651f855ac1988c4ae27b090e"
|
||||
@@ -1527,15 +1599,15 @@
|
||||
version = "v0.3.5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8a5a270130e940b167027e8c9a07c20d01410bd6053160432351139ddc2cb501"
|
||||
digest = "1:3b8fccf07d5825ca15ec7c14855ea3ff9cb62f565d8b0e1c97e0b37a0bee30b0"
|
||||
name = "github.com/oracle/oci-go-sdk"
|
||||
packages = [
|
||||
"common",
|
||||
"dns",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "5295b14d42d1417490d318ff91475c5868e0fd8f"
|
||||
version = "v4.1.0"
|
||||
revision = "bf9cc5e4c798e1e0b0749ce89c861cb13f37862d"
|
||||
version = "v5.15.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -1678,7 +1750,7 @@
|
||||
revision = "256dc444b735e061061cf46c809487313d5b0065"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:253f275bd72c42f8d234712d1574c8b222fe9b72838bfaca11b21ace9c0e3d0a"
|
||||
digest = "1:09ba8f72048eacb4b5545d81392e594f4e5622cd670e517847c47b5a194bb31c"
|
||||
name = "github.com/sacloud/libsacloud"
|
||||
packages = [
|
||||
".",
|
||||
@@ -1688,8 +1760,8 @@
|
||||
"utils/mutexkv",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "41c392dee98a83260abbe0fcd5c13beb7c75d103"
|
||||
version = "v1.21.1"
|
||||
revision = "5294ac87ea76a7dffd4fb6b088f3a6ce6c4f91b9"
|
||||
version = "v1.26.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:142520cf3c9bb85449dd0000f820b8c604531587ee654793c54909be7dabadac"
|
||||
@@ -1698,14 +1770,6 @@
|
||||
pruneopts = "NUT"
|
||||
revision = "1d7be4effb13d2d908342d349d71a284a7542693"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6bc0652ea6e39e22ccd522458b8bdd8665bf23bdc5a20eec90056e4dc7e273ca"
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:01252cd79aac70f16cac02a72a1067dd136e0ad6d5b597d0129cf74c739fd8d1"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
@@ -1776,8 +1840,7 @@
|
||||
version = "1.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:21ef4b12d6b6c6dfe389caa82f859d1e35eeb24ce61c60d09db57addb6a95781"
|
||||
digest = "1:4da573eeacb95c9452ae7ddc14b06148a99b29bf98522abb221ef9799426187c"
|
||||
name = "github.com/transip/gotransip"
|
||||
packages = [
|
||||
".",
|
||||
@@ -1785,7 +1848,8 @@
|
||||
"util",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c6e2ce0bbb4a601a909e3b7a773358d6b503e663"
|
||||
revision = "efb64632cab7701ec33f1eaeaa738e2207efe68e"
|
||||
version = "v5.14"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9b2996458a2f7d1f3e0ebf08152acfe8c1106f3fe855d08121c5ee7d801a063f"
|
||||
@@ -1891,6 +1955,14 @@
|
||||
revision = "939c094524d124c55fa8afe0e077701db4a865e2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b1d2c51a689eef501cb5726f8d9997c0ca4415cbfa7105fe6e64b1844eddf1fb"
|
||||
name = "github.com/vultr/govultr"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "ca447e056e08d93aa6e5b09e6ae3565dd1825281"
|
||||
version = "v0.1.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:669d39abbe79469bb50e32a26541026fd91326ac0ca56a9ad83be5b9887e5a8c"
|
||||
@@ -1915,7 +1987,7 @@
|
||||
revision = "0c8571ac0ce161a5feb57375a9cdf148c98c0f70"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:045bc0ab96bb83bdffd2606f019003da03d1c139d3cb8aad13596863e4dd37d6"
|
||||
digest = "1:a6f0a3485a3edbe3d66a5036d5dd8be1da89b8e077d98bf8a95ccfdad854d6cf"
|
||||
name = "go.opencensus.io"
|
||||
packages = [
|
||||
".",
|
||||
@@ -1923,8 +1995,10 @@
|
||||
"internal/tagencoding",
|
||||
"metric/metricdata",
|
||||
"metric/metricproducer",
|
||||
"plugin/ocgrpc",
|
||||
"plugin/ochttp",
|
||||
"plugin/ochttp/propagation/b3",
|
||||
"plugin/ochttp/propagation/tracecontext",
|
||||
"resource",
|
||||
"stats",
|
||||
"stats/internal",
|
||||
@@ -1936,8 +2010,8 @@
|
||||
"trace/tracestate",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "75c0cca22312e51bfd4fafdbe9197ae399e18b38"
|
||||
version = "v0.20.2"
|
||||
revision = "df6e2001952312404b06f5f6f03fcb4aec1648e5"
|
||||
version = "v0.21.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -2010,6 +2084,14 @@
|
||||
pruneopts = "NUT"
|
||||
revision = "ec22f46f877b4505e0117eeaab541714644fdd28"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["semaphore"]
|
||||
pruneopts = "NUT"
|
||||
revision = "112230192c580c3556b8cee6403af37a4fc5f28c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:4e67fdd7a13cbdb3c0dff0a7505abbdf4f42b12b27da350d66bffdc700db2899"
|
||||
@@ -2054,7 +2136,7 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:70c173b8ecc111dd01dc07f0ada72c076e4ed91618ee559312ef8adf154cc539"
|
||||
digest = "1:87f5f837cf3bee0aedbb43d1eeec6dd7d741a1e382d2f3b488fe8fa6c3486cbc"
|
||||
name = "google.golang.org/api"
|
||||
packages = [
|
||||
"dns/v1",
|
||||
@@ -2064,6 +2146,7 @@
|
||||
"googleapi/transport",
|
||||
"internal",
|
||||
"option",
|
||||
"support/bundler",
|
||||
"transport/http",
|
||||
"transport/http/internal/propagation",
|
||||
]
|
||||
@@ -2089,9 +2172,13 @@
|
||||
revision = "4f7eeb5305a4ba1966344836ba4af9996b7b4e05"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:93180612a69db36a06d801302b867d53a50a8a5f0943b34db66adc0574ea57df"
|
||||
digest = "1:a9d09c09842a02162edf759a35329c180e8ce82c2c6ae455a3c56d67b549be5b"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
packages = [
|
||||
"googleapis/api/httpbody",
|
||||
"googleapis/rpc/status",
|
||||
"protobuf/field_mask",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
||||
|
||||
@@ -2185,7 +2272,15 @@
|
||||
revision = "a5bcac82d3f637d3928d30476610891935b2d691"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:85a9c61c8d566fbbb108da8714893e6fc892fdb7a2910612a27f551ce30a0dd3"
|
||||
digest = "1:0942599d1f614d9ca4dfe052db1f60d4547b1b581206006be352f629a8b37d8d"
|
||||
name = "gopkg.in/resty.v1"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "fa5875c0caa5c260ab78acec5a244215a730247f"
|
||||
version = "v1.12.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a56dc1133e9c449d41d22ffabd6c2343cd25ab31c9cad0e74a144006810d1544"
|
||||
name = "gopkg.in/square/go-jose.v2"
|
||||
packages = [
|
||||
".",
|
||||
@@ -2193,8 +2288,8 @@
|
||||
"json",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "6ee92191fea850cdcab9a18867abf5f521cdbadb"
|
||||
version = "v2.1.4"
|
||||
revision = "730df5f748271903322feb182be83b43ebbbe27d"
|
||||
version = "v2.3.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:302ad18387350c3d9792da66de666f76d2ca8c62c47dd6b9434269c7cfa18971"
|
||||
|
@@ -180,7 +180,11 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-acme/lego"
|
||||
version = "2.6.0"
|
||||
version = "2.7.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
|
@@ -10,7 +10,7 @@ RUN go get golang.org/x/lint/golint \
|
||||
&& go get github.com/client9/misspell/cmd/misspell
|
||||
|
||||
# Which docker version to test on
|
||||
ARG DOCKER_VERSION=17.03.2
|
||||
ARG DOCKER_VERSION=18.09.7
|
||||
ARG DEP_VERSION=0.5.1
|
||||
|
||||
# Download go-bindata binary to bin folder in $GOPATH
|
||||
@@ -25,7 +25,7 @@ RUN mkdir -p /usr/local/bin \
|
||||
|
||||
# Download docker
|
||||
RUN mkdir -p /usr/local/bin \
|
||||
&& curl -fL https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}-ce.tgz \
|
||||
&& curl -fL https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}.tgz \
|
||||
| tar -xzC /usr/local/bin --transform 's#^.+/##x'
|
||||
|
||||
WORKDIR /go/src/github.com/containous/traefik
|
||||
|
@@ -111,6 +111,7 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration {
|
||||
defaultConsulCatalog.Prefix = "traefik"
|
||||
defaultConsulCatalog.FrontEndRule = "Host:{{.ServiceName}}.{{.Domain}}"
|
||||
defaultConsulCatalog.Stale = false
|
||||
defaultConsulCatalog.StrictChecks = true
|
||||
|
||||
// default Etcd
|
||||
var defaultEtcd etcd.Provider
|
||||
|
@@ -279,7 +279,7 @@ For example, `CF_API_EMAIL_FILE=/run/secrets/traefik_cf-api-email` could be used
|
||||
| Provider Name | Provider Code | Environment Variables | Wildcard & Root Domain Support |
|
||||
|-------------------------------------------------------------|----------------|---------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------|
|
||||
| [ACME DNS](https://github.com/joohoi/acme-dns) | `acme-dns` | `ACME_DNS_API_BASE`, `ACME_DNS_STORAGE_PATH` | Not tested yet |
|
||||
| [Alibaba Cloud](https://www.vultr.com) | `alidns` | `ALICLOUD_ACCESS_KEY`, `ALICLOUD_SECRET_KEY`, `ALICLOUD_REGION_ID` | Not tested yet |
|
||||
| [Alibaba Cloud](https://www.alibabacloud.com) | `alidns` | `ALICLOUD_ACCESS_KEY`, `ALICLOUD_SECRET_KEY`, `ALICLOUD_REGION_ID` | Not tested yet |
|
||||
| [Auroradns](https://www.pcextreme.com/aurora/dns) | `auroradns` | `AURORA_USER_ID`, `AURORA_KEY`, `AURORA_ENDPOINT` | Not tested yet |
|
||||
| [Azure](https://azure.microsoft.com/services/dns/) | `azure` | `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_SUBSCRIPTION_ID`, `AZURE_TENANT_ID`, `AZURE_RESOURCE_GROUP`, `[AZURE_METADATA_ENDPOINT]` | Not tested yet |
|
||||
| [Bindman](https://github.com/labbsr0x/bindman-dns-webhook) | `bindman` | `BINDMAN_MANAGER_ADDRESS` | YES |
|
||||
@@ -309,13 +309,14 @@ For example, `CF_API_EMAIL_FILE=/run/secrets/traefik_cf-api-email` could be used
|
||||
| HTTP request | `httpreq` | `HTTPREQ_ENDPOINT`, `HTTPREQ_MODE`, `HTTPREQ_USERNAME`, `HTTPREQ_PASSWORD` (1) | YES |
|
||||
| [IIJ](https://www.iij.ad.jp/) | `iij` | `IIJ_API_ACCESS_KEY`, `IIJ_API_SECRET_KEY`, `IIJ_DO_SERVICE_CODE` | Not tested yet |
|
||||
| [INWX](https://www.inwx.de/en) | `inwx` | `INWX_USERNAME`, `INWX_PASSWORD` | YES |
|
||||
| [Joker.com](https://joker.com) | `joker` | `JOKER_API_KEY` | YES |
|
||||
| [Joker.com](https://joker.com) | `joker` | `JOKER_API_KEY` or `JOKER_USERNAME`, `JOKER_PASSWORD` | YES |
|
||||
| [Lightsail](https://aws.amazon.com/lightsail/) | `lightsail` | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `DNS_ZONE` | Not tested yet |
|
||||
| [Linode](https://www.linode.com) | `linode` | `LINODE_API_KEY` | Not tested yet |
|
||||
| [Linode v4](https://www.linode.com) | `linodev4` | `LINODE_TOKEN` | Not tested yet |
|
||||
| manual | - | none, but you need to run Traefik interactively, turn on `acmeLogging` to see instructions and press <kbd>Enter</kbd>. | YES |
|
||||
| [MyDNS.jp](https://www.mydns.jp/) | `mydnsjp` | `MYDNSJP_MASTER_ID`, `MYDNSJP_PASSWORD` | YES |
|
||||
| [Namecheap](https://www.namecheap.com) | `namecheap` | `NAMECHEAP_API_USER`, `NAMECHEAP_API_KEY` | YES |
|
||||
| [Namesilo](https://www.namesilo.com/) | `namesilo` | `NAMESILO_API_KEY` | YES |
|
||||
| [name.com](https://www.name.com/) | `namedotcom` | `NAMECOM_USERNAME`, `NAMECOM_API_TOKEN`, `NAMECOM_SERVER` | Not tested yet |
|
||||
| [Netcup](https://www.netcup.eu/) | `netcup` | `NETCUP_CUSTOMER_NUMBER`, `NETCUP_API_KEY`, `NETCUP_API_PASSWORD` | Not tested yet |
|
||||
| [NIFCloud](https://cloud.nifty.com/service/dns.htm) | `nifcloud` | `NIFCLOUD_ACCESS_KEY_ID`, `NIFCLOUD_SECRET_ACCESS_KEY` | Not tested yet |
|
||||
@@ -333,6 +334,7 @@ For example, `CF_API_EMAIL_FILE=/run/secrets/traefik_cf-api-email` could be used
|
||||
| [Stackpath](https://www.stackpath.com/) | `stackpath` | `STACKPATH_CLIENT_ID`, `STACKPATH_CLIENT_SECRET`, `STACKPATH_STACK_ID` | Not tested yet |
|
||||
| [TransIP](https://www.transip.nl/) | `transip` | `TRANSIP_ACCOUNT_NAME`, `TRANSIP_PRIVATE_KEY_PATH` | YES |
|
||||
| [VegaDNS](https://github.com/shupp/VegaDNS-API) | `vegadns` | `SECRET_VEGADNS_KEY`, `SECRET_VEGADNS_SECRET`, `VEGADNS_URL` | Not tested yet |
|
||||
| [Versio](https://www.versio.nl/domeinnamen) | `versio` | `VERSIO_USERNAME`, `VERSIO_PASSWORD` | YES |
|
||||
| [Vscale](https://vscale.io/) | `vscale` | `VSCALE_API_TOKEN` | YES |
|
||||
| [VULTR](https://www.vultr.com) | `vultr` | `VULTR_API_KEY` | Not tested yet |
|
||||
| [Zone.ee](https://www.zone.ee) | `zoneee` | `ZONEEE_API_USER`, `ZONEEE_API_KEY` | YES |
|
||||
|
@@ -35,7 +35,7 @@
|
||||
|
||||
For more customization, see [entry points](/configuration/entrypoints/) documentation and the examples below.
|
||||
|
||||
## Web UI
|
||||
## Dashboard (Web UI)
|
||||
|
||||

|
||||
|
||||
@@ -322,9 +322,12 @@ curl -s "http://localhost:8080/health" | jq .
|
||||
}
|
||||
```
|
||||
|
||||
## Metrics
|
||||
## Dashboard Statistics
|
||||
|
||||
You can enable Traefik to export internal metrics to different monitoring systems.
|
||||
You can control how the Traefik's internal metrics are shown in the Dashboard.
|
||||
|
||||
If you want to export internal metrics to different monitoring systems,
|
||||
please check the page [Metrics](./metrics.md).
|
||||
|
||||
```toml
|
||||
[api]
|
||||
|
@@ -37,6 +37,15 @@ stale = false
|
||||
#
|
||||
domain = "consul.localhost"
|
||||
|
||||
# Keep a Consul node only if all checks status are passing
|
||||
# If true, only the Consul nodes with checks status 'passing' will be kept.
|
||||
# if false, only the Consul nodes with checks status 'passing' or 'warning' will be kept.
|
||||
#
|
||||
# Optional
|
||||
# Default: true
|
||||
#
|
||||
strictChecks = true
|
||||
|
||||
# Prefix for Consul catalog tags.
|
||||
#
|
||||
# Optional
|
||||
|
@@ -2,6 +2,11 @@
|
||||
|
||||
Traefik can be configured to use Rancher as a provider.
|
||||
|
||||
!!! important
|
||||
This provider is specific to Rancher 1.x.
|
||||
Rancher 2.x requires Kubernetes and does not have a metadata endpoint of its own for Traefik to query.
|
||||
As such, Rancher 2.x users should utilize the [Kubernetes provider](./kubernetes.md) directly.
|
||||
|
||||
## Global Configuration
|
||||
|
||||
```toml
|
||||
|
@@ -69,6 +69,8 @@ Here is an example of an extension setting Traefik labels:
|
||||
</StatelessServiceType>
|
||||
```
|
||||
|
||||
> **Note**: The `Label` tag and its `Key` attribute are case sensitive. That is, if you use `label` instead of `Label` or `key` instead of `Key`, they will be silently ignored.
|
||||
|
||||
#### Property Manager
|
||||
|
||||
Set Labels with the property manager API to overwrite and add labels, while your service is running.
|
||||
|
@@ -235,8 +235,10 @@ If you need to add or remove TLS certificates while Traefik is started, Dynamic
|
||||
## TLS Mutual Authentication
|
||||
|
||||
TLS Mutual Authentication can be `optional` or not.
|
||||
If it's `optional`, Traefik will authorize connection with certificates not signed by a specified Certificate Authority (CA).
|
||||
Otherwise, Traefik will only accept clients that present a certificate signed by a specified Certificate Authority (CA).
|
||||
|
||||
* If `optional = true`, if a certificate is provided, verifies if it is signed by a specified Certificate Authority (CA). Otherwise proceeds without any certificate.
|
||||
* If `optional = false`, Traefik will only accept clients that present a certificate signed by a specified Certificate Authority (CA).
|
||||
|
||||
`ClientCAFiles` can be configured with multiple `CA:s` in the same file or use multiple files containing one or several `CA:s`.
|
||||
The `CA:s` has to be in PEM format.
|
||||
|
||||
|
@@ -39,7 +39,7 @@ logLevel = "INFO"
|
||||
|
||||
For more information about the CLI, see the documentation about [Traefik command](/basics/#traefik).
|
||||
|
||||
```shell
|
||||
```bash
|
||||
--logLevel="DEBUG"
|
||||
--traefikLog.filePath="/path/to/traefik.log"
|
||||
--traefikLog.format="json"
|
||||
@@ -54,7 +54,6 @@ For more information about the CLI, see the documentation about [Traefik command
|
||||
--accessLog.fields.headers.names="User-Agent=redact Authorization=drop Content-Type=keep"
|
||||
```
|
||||
|
||||
|
||||
## Traefik Logs
|
||||
|
||||
By default the Traefik log is written to stdout in text format.
|
||||
@@ -66,7 +65,7 @@ To write the logs into a log file specify the `filePath`:
|
||||
filePath = "/path/to/traefik.log"
|
||||
```
|
||||
|
||||
To write JSON format logs, specify `json` as the format:
|
||||
To switch to JSON format instead of standard format (`common`), specify `json` as the format:
|
||||
|
||||
```toml
|
||||
[traefikLog]
|
||||
@@ -74,7 +73,6 @@ To write JSON format logs, specify `json` as the format:
|
||||
format = "json"
|
||||
```
|
||||
|
||||
|
||||
Deprecated way (before 1.4):
|
||||
|
||||
!!! danger "DEPRECATED"
|
||||
@@ -105,11 +103,10 @@ To customize the log level:
|
||||
logLevel = "ERROR"
|
||||
```
|
||||
|
||||
|
||||
## Access Logs
|
||||
|
||||
Access logs are written when `[accessLog]` is defined.
|
||||
By default it will write to stdout and produce logs in the textual Common Log Format (CLF), extended with additional fields.
|
||||
By default it will write to stdout and produce logs in the textual [Common Log Format (CLF)](#clf-common-log-format), extended with additional fields.
|
||||
|
||||
To enable access logs using the default settings just add the `[accessLog]` entry:
|
||||
|
||||
@@ -124,12 +121,12 @@ To write the logs into a log file specify the `filePath`:
|
||||
filePath = "/path/to/access.log"
|
||||
```
|
||||
|
||||
To write JSON format logs, specify `json` as the format:
|
||||
To switch to JSON format instead of [Common Log Format (CLF)](#clf-common-log-format), specify `json` as the format:
|
||||
|
||||
```toml
|
||||
[accessLog]
|
||||
filePath = "/path/to/access.log"
|
||||
format = "json"
|
||||
format = "json" # Default: "common"
|
||||
```
|
||||
|
||||
To write the logs in async, specify `bufferingSize` as the format (must be >0):
|
||||
@@ -152,7 +149,7 @@ To filter logs you can specify a set of filters which are logically "OR-connecte
|
||||
```toml
|
||||
[accessLog]
|
||||
filePath = "/path/to/access.log"
|
||||
format = "json"
|
||||
format = "json" # Default: "common"
|
||||
|
||||
[accessLog.filters]
|
||||
|
||||
@@ -178,12 +175,12 @@ format = "json"
|
||||
minDuration = "10ms"
|
||||
```
|
||||
|
||||
To customize logs format:
|
||||
To customize logs format, you must switch to the JSON format:
|
||||
|
||||
```toml
|
||||
[accessLog]
|
||||
filePath = "/path/to/access.log"
|
||||
format = "json"
|
||||
format = "json" # Default: "common"
|
||||
|
||||
[accessLog.filters]
|
||||
|
||||
@@ -227,7 +224,6 @@ format = "json"
|
||||
# ...
|
||||
```
|
||||
|
||||
|
||||
### List of all available fields
|
||||
|
||||
| Field | Description |
|
||||
@@ -281,10 +277,9 @@ accessLogsFile = "log/access.log"
|
||||
By default, Traefik use the CLF (`common`) as access log format.
|
||||
|
||||
```html
|
||||
<remote_IP_address> - <client_user_name_if_available> [<timestamp>] "<request_method> <request_path> <request_protocol>" <origin_server_HTTP_status> <origin_server_content_size> "<request_referrer>" "<request_user_agent>" <number_of_requests_received_since_Traefik_started> "<Traefik_frontend_name>" "<Traefik_backend_URL>" <request_duration_in_ms>ms
|
||||
<remote_IP_address> - <client_user_name_if_available> [<timestamp>] "<request_method> <request_path> <request_protocol>" <origin_server_HTTP_status> <origin_server_content_size> "<request_referrer>" "<request_user_agent>" <number_of_requests_received_since_Traefik_started> "<Traefik_frontend_name>" "<Traefik_backend_URL>" <request_duration_in_ms>ms
|
||||
```
|
||||
|
||||
|
||||
## Log Rotation
|
||||
|
||||
Traefik will close and reopen its log files, assuming they're configured, on receipt of a USR1 signal.
|
||||
@@ -292,3 +287,34 @@ This allows the logs to be rotated and processed by an external program, such as
|
||||
|
||||
!!! note
|
||||
This does not work on Windows due to the lack of USR signals.
|
||||
|
||||
## Time Zones
|
||||
|
||||
The timestamp of each log line is in UTC time by default.
|
||||
|
||||
If you want to use local timezone, you need to ensure the 3 following elements:
|
||||
|
||||
1. Provide the timezone data into /usr/share/zoneinfo
|
||||
2. Set the environement variable TZ to the timezone to be used
|
||||
3. Specify the field StartLocal instead of StartUTC (works on default Common Log Format (CLF) as well as JSON)
|
||||
|
||||
Example using docker-compose:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
traefik:
|
||||
image: containous/traefik:[latest stable version]
|
||||
ports:
|
||||
- "80:80"
|
||||
environment:
|
||||
- "TZ=US/Alaska"
|
||||
command:
|
||||
- --docker
|
||||
- --accesslog
|
||||
- --accesslog.fields.names="StartUTC=drop"
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||
- "/usr/share/zoneinfo:/usr/share/zoneinfo:ro"
|
||||
```
|
||||
|
@@ -6,7 +6,7 @@
|
||||
[](/)
|
||||
[](https://goreportcard.com/report/github.com/containous/traefik)
|
||||
[](https://github.com/containous/traefik/blob/master/LICENSE.md)
|
||||
[](https://slack.traefik.io)
|
||||
[](https://community.containo.us/)
|
||||
[](https://twitter.com/intent/follow?screen_name=traefik)
|
||||
|
||||
|
||||
|
@@ -91,7 +91,7 @@ To watch docker events, add `--docker.watch`.
|
||||
version: "3"
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:1.5
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
command:
|
||||
- "--api"
|
||||
- "--entrypoints=Name:http Address::80 Redirect.EntryPoint:https"
|
||||
@@ -156,7 +156,7 @@ The initializer in a docker-compose file will be:
|
||||
|
||||
```yaml
|
||||
traefik_init:
|
||||
image: traefik:1.5
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
command:
|
||||
- "storeconfig"
|
||||
- "--api"
|
||||
@@ -177,7 +177,7 @@ And now, the Traefik part will only have the Consul configuration.
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
image: traefik:1.5
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
depends_on:
|
||||
- traefik_init
|
||||
- consul
|
||||
@@ -200,7 +200,7 @@ The new configuration will be stored in Consul, and you need to restart the Trae
|
||||
version: "3.4"
|
||||
services:
|
||||
traefik_init:
|
||||
image: traefik:1.5
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
command:
|
||||
- "storeconfig"
|
||||
- "--api"
|
||||
@@ -229,7 +229,7 @@ services:
|
||||
depends_on:
|
||||
- consul
|
||||
traefik:
|
||||
image: traefik:1.5
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
depends_on:
|
||||
- traefik_init
|
||||
- consul
|
||||
|
@@ -50,7 +50,7 @@ version: '2'
|
||||
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:1.5.4
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
restart: always
|
||||
ports:
|
||||
- 80:80
|
||||
@@ -108,6 +108,28 @@ onHostRule = true
|
||||
entryPoint = "http"
|
||||
```
|
||||
|
||||
Alternatively, the `TOML` file above can also be translated into command line switches.
|
||||
This is the `command` value of the `traefik` service in the `docker-compose.yml` manifest:
|
||||
|
||||
```yaml
|
||||
command:
|
||||
- --debug=false
|
||||
- --logLevel=ERROR
|
||||
- --defaultentrypoints=https,http
|
||||
- --entryPoints=Name:http Address::80 Redirect.EntryPoint:https
|
||||
- --entryPoints=Name:https Address::443 TLS
|
||||
- --retry
|
||||
- --docker.endpoint=unix:///var/run/docker.sock
|
||||
- --docker.domain=my-awesome-app.org
|
||||
- --docker.watch=true
|
||||
- --docker.exposedbydefault=false
|
||||
- --acme.email=your-email-here@my-awesome-app.org
|
||||
- --acme.storage=acme.json
|
||||
- --acme.entryPoint=https
|
||||
- --acme.onHostRule=true
|
||||
- --acme.httpchallenge.entrypoint=http
|
||||
```
|
||||
|
||||
This is the minimum configuration required to do the following:
|
||||
|
||||
- Log `ERROR`-level messages (or more severe) to the console, but silence `DEBUG`-level messages
|
||||
|
@@ -336,37 +336,37 @@ Pay attention to the **labels** section:
|
||||
|
||||
```
|
||||
home:
|
||||
image: abiosoft/caddy:0.10.14
|
||||
networks:
|
||||
- ntw_front
|
||||
volumes:
|
||||
- ./www/home/srv/:/srv/
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 2
|
||||
#placement:
|
||||
# constraints: [node.role==manager]
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
max_attempts: 5
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.20'
|
||||
memory: 9M
|
||||
reservations:
|
||||
cpus: '0.05'
|
||||
memory: 9M
|
||||
labels:
|
||||
- "traefik.frontend.rule=PathPrefixStrip:/"
|
||||
- "traefik.backend=home"
|
||||
- "traefik.port=2015"
|
||||
- "traefik.weight=10"
|
||||
- "traefik.enable=true"
|
||||
- "traefik.passHostHeader=true"
|
||||
- "traefik.docker.network=ntw_front"
|
||||
- "traefik.frontend.entryPoints=http"
|
||||
- "traefik.backend.loadbalancer.swarm=true"
|
||||
- "traefik.backend.loadbalancer.method=drr"
|
||||
image: abiosoft/caddy:0.10.14
|
||||
networks:
|
||||
- ntw_front
|
||||
volumes:
|
||||
- ./www/home/srv/:/srv/
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 2
|
||||
#placement:
|
||||
# constraints: [node.role==manager]
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
max_attempts: 5
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.20'
|
||||
memory: 9M
|
||||
reservations:
|
||||
cpus: '0.05'
|
||||
memory: 9M
|
||||
labels:
|
||||
- "traefik.frontend.rule=PathPrefixStrip:/"
|
||||
- "traefik.backend=home"
|
||||
- "traefik.port=2015"
|
||||
- "traefik.weight=10"
|
||||
- "traefik.enable=true"
|
||||
- "traefik.passHostHeader=true"
|
||||
- "traefik.docker.network=ntw_front"
|
||||
- "traefik.frontend.entryPoints=http"
|
||||
- "traefik.backend.loadbalancer.swarm=true"
|
||||
- "traefik.backend.loadbalancer.method=drr"
|
||||
```
|
||||
|
||||
Something more tricky using `regex`.
|
||||
@@ -377,39 +377,39 @@ The double sign `$$` are variables managed by the docker compose file ([document
|
||||
|
||||
```
|
||||
portainer:
|
||||
image: portainer/portainer:1.16.5
|
||||
networks:
|
||||
- ntw_front
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.role==manager]
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
max_attempts: 5
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.33'
|
||||
memory: 20M
|
||||
reservations:
|
||||
cpus: '0.05'
|
||||
memory: 10M
|
||||
labels:
|
||||
- "traefik.frontend.rule=PathPrefixStrip:/portainer"
|
||||
- "traefik.backend=portainer"
|
||||
- "traefik.port=9000"
|
||||
- "traefik.weight=10"
|
||||
- "traefik.enable=true"
|
||||
- "traefik.passHostHeader=true"
|
||||
- "traefik.docker.network=ntw_front"
|
||||
- "traefik.frontend.entryPoints=http"
|
||||
- "traefik.backend.loadbalancer.swarm=true"
|
||||
- "traefik.backend.loadbalancer.method=drr"
|
||||
# https://github.com/containous/traefik/issues/563#issuecomment-421360934
|
||||
- "traefik.frontend.redirect.regex=^(.*)/portainer$$"
|
||||
- "traefik.frontend.redirect.replacement=$$1/portainer/"
|
||||
- "traefik.frontend.rule=PathPrefix:/portainer;ReplacePathRegex: ^/portainer/(.*) /$$1"
|
||||
image: portainer/portainer:1.16.5
|
||||
networks:
|
||||
- ntw_front
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.role==manager]
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
max_attempts: 5
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.33'
|
||||
memory: 20M
|
||||
reservations:
|
||||
cpus: '0.05'
|
||||
memory: 10M
|
||||
labels:
|
||||
- "traefik.frontend.rule=PathPrefixStrip:/portainer"
|
||||
- "traefik.backend=portainer"
|
||||
- "traefik.port=9000"
|
||||
- "traefik.weight=10"
|
||||
- "traefik.enable=true"
|
||||
- "traefik.passHostHeader=true"
|
||||
- "traefik.docker.network=ntw_front"
|
||||
- "traefik.frontend.entryPoints=http"
|
||||
- "traefik.backend.loadbalancer.swarm=true"
|
||||
- "traefik.backend.loadbalancer.method=drr"
|
||||
# https://github.com/containous/traefik/issues/563#issuecomment-421360934
|
||||
- "traefik.frontend.redirect.regex=^(.*)/portainer$$"
|
||||
- "traefik.frontend.redirect.replacement=$$1/portainer/"
|
||||
- "traefik.frontend.rule=PathPrefix:/portainer;ReplacePathRegex: ^/portainer/(.*) /$$1"
|
||||
```
|
||||
|
@@ -139,7 +139,7 @@ Here is the [docker-compose file](https://docs.docker.com/compose/compose-file/)
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
image: traefik
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
command: --consul --consul.endpoint=127.0.0.1:8500
|
||||
ports:
|
||||
- "80:80"
|
||||
|
@@ -85,7 +85,7 @@ docker-machine ssh manager "docker service create \
|
||||
--publish 80:80 --publish 8080:8080 \
|
||||
--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock \
|
||||
--network traefik-net \
|
||||
traefik \
|
||||
traefik:<stable version from https://hub.docker.com/_/traefik> \
|
||||
--docker \
|
||||
--docker.swarmMode \
|
||||
--docker.domain=traefik \
|
||||
|
@@ -81,7 +81,7 @@ docker $(docker-machine config mhs-demo0) run \
|
||||
-p 80:80 -p 8080:8080 \
|
||||
--net=my-net \
|
||||
-v /var/lib/boot2docker/:/ssl \
|
||||
traefik \
|
||||
traefik:<stable version from https://hub.docker.com/_/traefik> \
|
||||
-l DEBUG \
|
||||
-c /dev/null \
|
||||
--docker \
|
||||
|
@@ -49,11 +49,16 @@ func (opt Options) String() string {
|
||||
return fmt.Sprintf("[Hostname: %s Headers: %v Path: %s Port: %d Interval: %s]", opt.Hostname, opt.Headers, opt.Path, opt.Port, opt.Interval)
|
||||
}
|
||||
|
||||
type backendURL struct {
|
||||
url *url.URL
|
||||
weight int
|
||||
}
|
||||
|
||||
// BackendConfig HealthCheck configuration for a backend
|
||||
type BackendConfig struct {
|
||||
Options
|
||||
name string
|
||||
disabledURLs []*url.URL
|
||||
disabledURLs []backendURL
|
||||
requestTimeout time.Duration
|
||||
}
|
||||
|
||||
@@ -129,18 +134,18 @@ func (hc *HealthCheck) execute(ctx context.Context, backend *BackendConfig) {
|
||||
|
||||
func (hc *HealthCheck) checkBackend(backend *BackendConfig) {
|
||||
enabledURLs := backend.LB.Servers()
|
||||
var newDisabledURLs []*url.URL
|
||||
for _, url := range backend.disabledURLs {
|
||||
var newDisabledURLs []backendURL
|
||||
for _, backendurl := range backend.disabledURLs {
|
||||
serverUpMetricValue := float64(0)
|
||||
if err := checkHealth(url, backend); err == nil {
|
||||
log.Warnf("Health check up: Returning to server list. Backend: %q URL: %q", backend.name, url.String())
|
||||
backend.LB.UpsertServer(url, roundrobin.Weight(1))
|
||||
if err := checkHealth(backendurl.url, backend); err == nil {
|
||||
log.Warnf("Health check up: Returning to server list. Backend: %q URL: %q Weight: %d", backend.name, backendurl.url.String(), backendurl.weight)
|
||||
backend.LB.UpsertServer(backendurl.url, roundrobin.Weight(backendurl.weight))
|
||||
serverUpMetricValue = 1
|
||||
} else {
|
||||
log.Warnf("Health check still failing. Backend: %q URL: %q Reason: %s", backend.name, url.String(), err)
|
||||
newDisabledURLs = append(newDisabledURLs, url)
|
||||
log.Warnf("Health check still failing. Backend: %q URL: %q Reason: %s", backend.name, backendurl.url.String(), err)
|
||||
newDisabledURLs = append(newDisabledURLs, backendurl)
|
||||
}
|
||||
labelValues := []string{"backend", backend.name, "url", url.String()}
|
||||
labelValues := []string{"backend", backend.name, "url", backendurl.url.String()}
|
||||
hc.metrics.BackendServerUpGauge().With(labelValues...).Set(serverUpMetricValue)
|
||||
}
|
||||
backend.disabledURLs = newDisabledURLs
|
||||
@@ -148,9 +153,18 @@ func (hc *HealthCheck) checkBackend(backend *BackendConfig) {
|
||||
for _, url := range enabledURLs {
|
||||
serverUpMetricValue := float64(1)
|
||||
if err := checkHealth(url, backend); err != nil {
|
||||
log.Warnf("Health check failed: Remove from server list. Backend: %q URL: %q Reason: %s", backend.name, url.String(), err)
|
||||
weight := 1
|
||||
rr, ok := backend.LB.(*roundrobin.RoundRobin)
|
||||
if ok {
|
||||
var gotWeight bool
|
||||
weight, gotWeight = rr.ServerWeight(url)
|
||||
if !gotWeight {
|
||||
weight = 1
|
||||
}
|
||||
}
|
||||
log.Warnf("Health check failed: Remove from server list. Backend: %q URL: %q Weight: %d Reason: %s", backend.name, url.String(), weight, err)
|
||||
backend.LB.RemoveServer(url)
|
||||
backend.disabledURLs = append(backend.disabledURLs, url)
|
||||
backend.disabledURLs = append(backend.disabledURLs, backendURL{url, weight})
|
||||
serverUpMetricValue = 0
|
||||
}
|
||||
labelValues := []string{"backend", backend.name, "url", url.String()}
|
||||
|
@@ -112,7 +112,7 @@ func TestSetBackendsConfiguration(t *testing.T) {
|
||||
if test.startHealthy {
|
||||
lb.servers = append(lb.servers, serverURL)
|
||||
} else {
|
||||
backend.disabledURLs = append(backend.disabledURLs, serverURL)
|
||||
backend.disabledURLs = append(backend.disabledURLs, backendURL{serverURL, 1})
|
||||
}
|
||||
|
||||
collectingMetrics := testhelpers.NewCollectingHealthCheckMetrics()
|
||||
|
@@ -24,6 +24,8 @@ func (f *CommonLogFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
var timestamp = defaultValue
|
||||
if v, ok := entry.Data[StartUTC]; ok {
|
||||
timestamp = v.(time.Time).Format(commonLogTimeFormat)
|
||||
} else if v, ok := entry.Data[StartLocal]; ok {
|
||||
timestamp = v.(time.Time).Local().Format(commonLogTimeFormat)
|
||||
}
|
||||
|
||||
var elapsedMillis int64
|
||||
|
@@ -2,6 +2,7 @@ package accesslog
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -57,10 +58,34 @@ func TestCommonLogFormatter_Format(t *testing.T) {
|
||||
BackendURL: "http://10.0.0.2/toto",
|
||||
},
|
||||
expectedLog: `10.0.0.1 - Client [10/Nov/2009:23:00:00 +0000] "GET /foo http" 123 132 "referer" "agent" - "foo" "http://10.0.0.2/toto" 123000ms
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "all data with local time",
|
||||
data: map[string]interface{}{
|
||||
StartLocal: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
Duration: 123 * time.Second,
|
||||
ClientHost: "10.0.0.1",
|
||||
ClientUsername: "Client",
|
||||
RequestMethod: http.MethodGet,
|
||||
RequestPath: "/foo",
|
||||
RequestProtocol: "http",
|
||||
OriginStatus: 123,
|
||||
OriginContentSize: 132,
|
||||
RequestRefererHeader: "referer",
|
||||
RequestUserAgentHeader: "agent",
|
||||
RequestCount: nil,
|
||||
FrontendName: "foo",
|
||||
BackendURL: "http://10.0.0.2/toto",
|
||||
},
|
||||
expectedLog: `10.0.0.1 - Client [10/Nov/2009:14:00:00 -0900] "GET /foo http" 123 132 "referer" "agent" - "foo" "http://10.0.0.2/toto" 123000ms
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
// Set timezone to Alaska to have a constant behavior
|
||||
os.Setenv("TZ", "US/Alaska")
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
@@ -44,7 +44,7 @@ type DistinguishedNameOptions struct {
|
||||
|
||||
// TLSClientHeaders is a middleware that helps setup a few tls info features.
|
||||
type TLSClientHeaders struct {
|
||||
Infos *TLSClientCertificateInfos // pass selected informations from the client certificate
|
||||
Infos *TLSClientCertificateInfos // pass selected information from the client certificate
|
||||
PEM bool // pass the sanitized pem to the backend in a specific header
|
||||
}
|
||||
|
||||
@@ -79,23 +79,14 @@ func newTLSClientInfos(infos *types.TLSClientCertificateInfos) *TLSClientCertifi
|
||||
}
|
||||
|
||||
// NewTLSClientHeaders constructs a new TLSClientHeaders instance from supplied frontend header struct.
|
||||
func NewTLSClientHeaders(frontend *types.Frontend) *TLSClientHeaders {
|
||||
if frontend == nil {
|
||||
func NewTLSClientHeaders(passTLSClientCert *types.TLSClientHeaders) *TLSClientHeaders {
|
||||
if passTLSClientCert == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var addPEM bool
|
||||
var infos *TLSClientCertificateInfos
|
||||
|
||||
if frontend.PassTLSClientCert != nil {
|
||||
conf := frontend.PassTLSClientCert
|
||||
addPEM = conf.PEM
|
||||
infos = newTLSClientInfos(conf.Infos)
|
||||
}
|
||||
|
||||
return &TLSClientHeaders{
|
||||
Infos: infos,
|
||||
PEM: addPEM,
|
||||
Infos: newTLSClientInfos(passTLSClientCert.Infos),
|
||||
PEM: passTLSClientCert.PEM,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -221,7 +212,7 @@ func writePart(content *strings.Builder, entry string, prefix string) {
|
||||
}
|
||||
}
|
||||
|
||||
// getXForwardedTLSClientCertInfo Build a string with the wanted client certificates informations
|
||||
// getXForwardedTLSClientCertInfo Build a string with the wanted client certificates information
|
||||
// like Subject="DC=%s,C=%s,ST=%s,L=%s,O=%s,CN=%s",NB=%d,NA=%d,SAN=%s;
|
||||
func (s *TLSClientHeaders) getXForwardedTLSClientCertInfo(certs []*x509.Certificate) string {
|
||||
var headerValues []string
|
||||
@@ -268,8 +259,9 @@ func (s *TLSClientHeaders) getXForwardedTLSClientCertInfo(certs []*x509.Certific
|
||||
return strings.Join(headerValues, ";")
|
||||
}
|
||||
|
||||
// ModifyRequestHeaders set the wanted headers with the certificates informations
|
||||
// ModifyRequestHeaders set the wanted headers with the certificates information
|
||||
func (s *TLSClientHeaders) ModifyRequestHeaders(r *http.Request) {
|
||||
r.Header.Del(xForwardedTLSClientCert)
|
||||
if s.PEM {
|
||||
if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 {
|
||||
r.Header.Set(xForwardedTLSClientCert, getXForwardedTLSClientCert(r.TLS.PeerCertificates))
|
||||
@@ -278,6 +270,7 @@ func (s *TLSClientHeaders) ModifyRequestHeaders(r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
r.Header.Del(xForwardedTLSClientCertInfos)
|
||||
if s.Infos != nil {
|
||||
if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 {
|
||||
headerContent := s.getXForwardedTLSClientCertInfo(r.TLS.PeerCertificates)
|
||||
|
@@ -240,26 +240,6 @@ mxcl71pV8i3NDU3kgVi2440JYpoMveTlXPCV2svHNCw0X238YHsSW4b93yGJO0gI
|
||||
ML9n/4zmm1PMhzZHcEA72ZAq0tKCxpz10djg5v2qL5V+Oaz8TtTOZbPsxpiKMQ==
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
|
||||
minimalCert = `-----BEGIN CERTIFICATE-----
|
||||
MIIDGTCCAgECCQCqLd75YLi2kDANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJG
|
||||
UjETMBEGA1UECAwKU29tZS1TdGF0ZTERMA8GA1UEBwwIVG91bG91c2UxITAfBgNV
|
||||
BAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0xODA3MTgwODI4MTZaFw0x
|
||||
ODA4MTcwODI4MTZaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRl
|
||||
MSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3
|
||||
DQEBAQUAA4IBDwAwggEKAoIBAQC/+frDMMTLQyXG34F68BPhQq0kzK4LIq9Y0/gl
|
||||
FjySZNn1C0QDWA1ubVCAcA6yY204I9cxcQDPNrhC7JlS5QA8Y5rhIBrqQlzZizAi
|
||||
Rj3NTrRjtGUtOScnHuJaWjLy03DWD+aMwb7q718xt5SEABmmUvLwQK+EjW2MeDwj
|
||||
y8/UEIpvrRDmdhGaqv7IFpIDkcIF7FowJ/hwDvx3PMc+z/JWK0ovzpvgbx69AVbw
|
||||
ZxCimeha65rOqVi+lEetD26le+WnOdYsdJ2IkmpPNTXGdfb15xuAc+gFXfMCh7Iw
|
||||
3Ynl6dZtZM/Ok2kiA7/OsmVnRKkWrtBfGYkI9HcNGb3zrk6nAgMBAAEwDQYJKoZI
|
||||
hvcNAQELBQADggEBAC/R+Yvhh1VUhcbK49olWsk/JKqfS3VIDQYZg1Eo+JCPbwgS
|
||||
I1BSYVfMcGzuJTX6ua3m/AHzGF3Tap4GhF4tX12jeIx4R4utnjj7/YKkTvuEM2f4
|
||||
xT56YqI7zalGScIB0iMeyNz1QcimRl+M/49au8ow9hNX8C2tcA2cwd/9OIj/6T8q
|
||||
SBRHc6ojvbqZSJCO0jziGDT1L3D+EDgTjED4nd77v/NRdP+egb0q3P0s4dnQ/5AV
|
||||
aQlQADUn61j3ScbGJ4NSeZFFvsl38jeRi/MEzp0bGgNBcPj6JHi7qbbauZcZfQ05
|
||||
jECvgAY7Nfd9mZ1KtyNaW31is+kag7NsvjxU/kM=
|
||||
-----END CERTIFICATE-----`
|
||||
)
|
||||
|
||||
func getCleanCertContents(certContents []string) string {
|
||||
@@ -303,10 +283,6 @@ func buildTLSWith(certContents []string) *tls.ConnectionState {
|
||||
return &tls.ConnectionState{PeerCertificates: peerCertificates}
|
||||
}
|
||||
|
||||
var myPassTLSClientCustomHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("bar"))
|
||||
})
|
||||
|
||||
func getExpectedSanitized(s string) string {
|
||||
return url.QueryEscape(strings.Replace(s, "\n", "", -1))
|
||||
}
|
||||
@@ -360,20 +336,13 @@ WqeUSNGYV//RunTeuRDAf5OxehERb1srzBXhRZ3cZdzXbgR/`),
|
||||
|
||||
}
|
||||
|
||||
func TestTlsClientheadersWithPEM(t *testing.T) {
|
||||
func TestTlsClientHeadersWithPEM(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
certContents []string // set the request TLS attribute if defined
|
||||
tlsClientCertHeaders *types.TLSClientHeaders
|
||||
expectedHeader string
|
||||
}{
|
||||
{
|
||||
desc: "No TLS, no option",
|
||||
},
|
||||
{
|
||||
desc: "TLS, no option",
|
||||
certContents: []string{minimalCheeseCrt},
|
||||
},
|
||||
{
|
||||
desc: "No TLS, with pem option true",
|
||||
tlsClientCertHeaders: &types.TLSClientHeaders{PEM: true},
|
||||
@@ -399,20 +368,24 @@ func TestTlsClientheadersWithPEM(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
tlsClientHeaders := NewTLSClientHeaders(&types.Frontend{PassTLSClientCert: test.tlsClientCertHeaders})
|
||||
|
||||
res := httptest.NewRecorder()
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, "http://example.com/foo", nil)
|
||||
|
||||
if test.certContents != nil && len(test.certContents) > 0 {
|
||||
req.TLS = buildTLSWith(test.certContents)
|
||||
}
|
||||
|
||||
tlsClientHeaders.ServeHTTP(res, req, myPassTLSClientCustomHandler)
|
||||
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tlsClientHeaders := NewTLSClientHeaders(test.tlsClientCertHeaders)
|
||||
|
||||
res := httptest.NewRecorder()
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, "http://example.com/foo", nil)
|
||||
|
||||
if test.tlsClientCertHeaders != nil {
|
||||
req.Header.Set(xForwardedTLSClientCert, "Unsanitized HEADER")
|
||||
}
|
||||
|
||||
if test.certContents != nil && len(test.certContents) > 0 {
|
||||
req.TLS = buildTLSWith(test.certContents)
|
||||
}
|
||||
|
||||
tlsClientHeaders.ServeHTTP(res, req, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("bar"))
|
||||
}))
|
||||
|
||||
require.Equal(t, http.StatusOK, res.Code, "Http Status should be OK")
|
||||
require.Equal(t, "bar", res.Body.String(), "Should be the expected body")
|
||||
@@ -477,7 +450,7 @@ func TestGetSans(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestTlsClientheadersWithCertInfos(t *testing.T) {
|
||||
func TestTlsClientHeadersWithCertInfos(t *testing.T) {
|
||||
minimalCheeseCertAllInfos := `Subject="C=FR,ST=Some-State,O=Cheese",Issuer="DC=org,DC=cheese,C=FR,C=US,ST=Signing State,ST=Signing State 2,L=TOULOUSE,L=LYON,O=Cheese,O=Cheese 2,CN=Simple Signing CA 2",NB=1544094636,NA=1632568236,SAN=`
|
||||
completeCertAllInfos := `Subject="DC=org,DC=cheese,C=FR,C=US,ST=Cheese org state,ST=Cheese com state,L=TOULOUSE,L=LYON,O=Cheese,O=Cheese 2,CN=*.cheese.com",Issuer="DC=org,DC=cheese,C=FR,C=US,ST=Signing State,ST=Signing State 2,L=TOULOUSE,L=LYON,O=Cheese,O=Cheese 2,CN=Simple Signing CA 2",NB=1544094616,NA=1607166616,SAN=*.cheese.org,*.cheese.net,*.cheese.com,test@cheese.org,test@cheese.net,10.0.1.0,10.0.1.2`
|
||||
|
||||
@@ -487,13 +460,6 @@ func TestTlsClientheadersWithCertInfos(t *testing.T) {
|
||||
tlsClientCertHeaders *types.TLSClientHeaders
|
||||
expectedHeader string
|
||||
}{
|
||||
{
|
||||
desc: "No TLS, no option",
|
||||
},
|
||||
{
|
||||
desc: "TLS, no option",
|
||||
certContents: []string{minimalCert},
|
||||
},
|
||||
{
|
||||
desc: "No TLS, with pem option true",
|
||||
tlsClientCertHeaders: &types.TLSClientHeaders{
|
||||
@@ -627,20 +593,24 @@ func TestTlsClientheadersWithCertInfos(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
tlsClientHeaders := NewTLSClientHeaders(&types.Frontend{PassTLSClientCert: test.tlsClientCertHeaders})
|
||||
|
||||
res := httptest.NewRecorder()
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, "http://example.com/foo", nil)
|
||||
|
||||
if test.certContents != nil && len(test.certContents) > 0 {
|
||||
req.TLS = buildTLSWith(test.certContents)
|
||||
}
|
||||
|
||||
tlsClientHeaders.ServeHTTP(res, req, myPassTLSClientCustomHandler)
|
||||
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tlsClientHeaders := NewTLSClientHeaders(test.tlsClientCertHeaders)
|
||||
|
||||
res := httptest.NewRecorder()
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, "http://example.com/foo", nil)
|
||||
|
||||
if test.tlsClientCertHeaders != nil {
|
||||
req.Header.Set(xForwardedTLSClientCertInfos, "Unsanitized HEADER")
|
||||
}
|
||||
|
||||
if test.certContents != nil && len(test.certContents) > 0 {
|
||||
req.TLS = buildTLSWith(test.certContents)
|
||||
}
|
||||
|
||||
tlsClientHeaders.ServeHTTP(res, req, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("bar"))
|
||||
}))
|
||||
|
||||
require.Equal(t, http.StatusOK, res.Code, "Http Status should be OK")
|
||||
require.Equal(t, "bar", res.Body.String(), "Should be the expected body")
|
||||
@@ -664,45 +634,31 @@ func TestTlsClientheadersWithCertInfos(t *testing.T) {
|
||||
|
||||
func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
frontend *types.Frontend
|
||||
expected *TLSClientHeaders
|
||||
desc string
|
||||
tlsClientHeaders *types.TLSClientHeaders
|
||||
expected *TLSClientHeaders
|
||||
}{
|
||||
{
|
||||
desc: "Without frontend",
|
||||
},
|
||||
{
|
||||
desc: "frontend without the option",
|
||||
frontend: &types.Frontend{},
|
||||
expected: &TLSClientHeaders{},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the pem set false",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
PEM: false,
|
||||
},
|
||||
desc: "TLS client headers with the pem set false",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
PEM: false,
|
||||
},
|
||||
expected: &TLSClientHeaders{PEM: false},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the pem set true",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
PEM: true,
|
||||
},
|
||||
desc: "TLS client headers with the pem set true",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
PEM: true,
|
||||
},
|
||||
expected: &TLSClientHeaders{PEM: true},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos with no flag",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotAfter: false,
|
||||
NotBefore: false,
|
||||
Sans: false,
|
||||
},
|
||||
desc: "TLS client headers with the Infos with no flag",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotAfter: false,
|
||||
NotBefore: false,
|
||||
Sans: false,
|
||||
},
|
||||
},
|
||||
expected: &TLSClientHeaders{
|
||||
@@ -711,14 +667,12 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos basic",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotAfter: true,
|
||||
NotBefore: true,
|
||||
Sans: true,
|
||||
},
|
||||
desc: "TLS client headers with the Infos basic",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotAfter: true,
|
||||
NotBefore: true,
|
||||
Sans: true,
|
||||
},
|
||||
},
|
||||
expected: &TLSClientHeaders{
|
||||
@@ -731,12 +685,10 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos NotAfter",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotAfter: true,
|
||||
},
|
||||
desc: "TLS client headers with the Infos NotAfter",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotAfter: true,
|
||||
},
|
||||
},
|
||||
expected: &TLSClientHeaders{
|
||||
@@ -747,12 +699,10 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos NotBefore",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotBefore: true,
|
||||
},
|
||||
desc: "TLS client headers with the Infos NotBefore",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotBefore: true,
|
||||
},
|
||||
},
|
||||
expected: &TLSClientHeaders{
|
||||
@@ -763,12 +713,10 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos Sans",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Sans: true,
|
||||
},
|
||||
desc: "TLS client headers with the Infos Sans",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Sans: true,
|
||||
},
|
||||
},
|
||||
expected: &TLSClientHeaders{
|
||||
@@ -779,13 +727,11 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos Subject Organization",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
Organization: true,
|
||||
},
|
||||
desc: "TLS client headers with the Infos Subject Organization",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
Organization: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -799,13 +745,11 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos Subject Country",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
Country: true,
|
||||
},
|
||||
desc: "TLS client headers with the Infos Subject Country",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
Country: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -819,13 +763,11 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos Subject SerialNumber",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
SerialNumber: true,
|
||||
},
|
||||
desc: "TLS client headers with the Infos Subject SerialNumber",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
SerialNumber: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -839,13 +781,11 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos Subject Province",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
Province: true,
|
||||
},
|
||||
desc: "TLS client headers with the Infos Subject Province",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
Province: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -859,13 +799,11 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos Subject Locality",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
Locality: true,
|
||||
},
|
||||
desc: "TLS client headers with the Infos Subject Locality",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
Locality: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -879,13 +817,11 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos Subject CommonName",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
CommonName: true,
|
||||
},
|
||||
desc: "TLS client headers with the Infos Subject CommonName",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
CommonName: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -899,19 +835,17 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos Issuer",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Issuer: &types.TLSCLientCertificateDNInfos{
|
||||
CommonName: true,
|
||||
Country: true,
|
||||
DomainComponent: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
SerialNumber: true,
|
||||
Province: true,
|
||||
},
|
||||
desc: "TLS client headers with the Infos Issuer",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Issuer: &types.TLSCLientCertificateDNInfos{
|
||||
CommonName: true,
|
||||
Country: true,
|
||||
DomainComponent: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
SerialNumber: true,
|
||||
Province: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -931,12 +865,10 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Sans Infos",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Sans: true,
|
||||
},
|
||||
desc: "TLS client headers with the Sans Infos",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
Sans: true,
|
||||
},
|
||||
},
|
||||
expected: &TLSClientHeaders{
|
||||
@@ -947,30 +879,28 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "frontend with the Infos all",
|
||||
frontend: &types.Frontend{
|
||||
PassTLSClientCert: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotAfter: true,
|
||||
NotBefore: true,
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
CommonName: true,
|
||||
Country: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
Province: true,
|
||||
SerialNumber: true,
|
||||
},
|
||||
Issuer: &types.TLSCLientCertificateDNInfos{
|
||||
Country: true,
|
||||
DomainComponent: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
SerialNumber: true,
|
||||
Province: true,
|
||||
},
|
||||
Sans: true,
|
||||
desc: "TLS client headers with the Infos all",
|
||||
tlsClientHeaders: &types.TLSClientHeaders{
|
||||
Infos: &types.TLSClientCertificateInfos{
|
||||
NotAfter: true,
|
||||
NotBefore: true,
|
||||
Subject: &types.TLSCLientCertificateDNInfos{
|
||||
CommonName: true,
|
||||
Country: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
Province: true,
|
||||
SerialNumber: true,
|
||||
},
|
||||
Issuer: &types.TLSCLientCertificateDNInfos{
|
||||
Country: true,
|
||||
DomainComponent: true,
|
||||
Locality: true,
|
||||
Organization: true,
|
||||
SerialNumber: true,
|
||||
Province: true,
|
||||
},
|
||||
Sans: true,
|
||||
},
|
||||
},
|
||||
expected: &TLSClientHeaders{
|
||||
@@ -1004,8 +934,7 @@ func TestNewTLSClientHeadersFromStruct(t *testing.T) {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
require.Equal(t, test.expected, NewTLSClientHeaders(test.frontend))
|
||||
require.Equal(t, test.expected, NewTLSClientHeaders(test.tlsClientHeaders))
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -34,6 +34,7 @@ type Provider struct {
|
||||
Stale bool `description:"Use stale consistency for catalog reads" export:"true"`
|
||||
ExposedByDefault bool `description:"Expose Consul services by default" export:"true"`
|
||||
Prefix string `description:"Prefix used for Consul catalog tags" export:"true"`
|
||||
StrictChecks bool `description:"Keep a Consul node only if all checks status are passing" export:"true"`
|
||||
FrontEndRule string `description:"Frontend rule used for Consul services" export:"true"`
|
||||
TLS *types.ClientTLS `description:"Enable TLS support" export:"true"`
|
||||
client *api.Client
|
||||
@@ -301,6 +302,8 @@ func (p *Provider) watchHealthState(stopCh <-chan struct{}, watchCh chan<- map[s
|
||||
_, failing := currentFailing[key]
|
||||
if healthy.Status == "passing" && !failing {
|
||||
current[key] = append(current[key], healthy.Node)
|
||||
} else if !p.StrictChecks && healthy.Status == "warning" && !failing {
|
||||
current[key] = append(current[key], healthy.Node)
|
||||
} else if strings.HasPrefix(healthy.CheckID, "_service_maintenance") || strings.HasPrefix(healthy.CheckID, "_node_maintenance") {
|
||||
maintenance = append(maintenance, healthy.CheckID)
|
||||
} else {
|
||||
@@ -489,7 +492,8 @@ func getServiceAddresses(services []*api.CatalogService) []string {
|
||||
|
||||
func (p *Provider) healthyNodes(service string) (catalogUpdate, error) {
|
||||
health := p.client.Health()
|
||||
data, _, err := health.Service(service, "", true, &api.QueryOptions{AllowStale: p.Stale})
|
||||
// You can't filter with assigning passingOnly here, nodeFilter will do this later
|
||||
data, _, err := health.Service(service, "", false, &api.QueryOptions{AllowStale: p.Stale})
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Failed to fetch details of %s", service)
|
||||
return catalogUpdate{}, err
|
||||
@@ -533,7 +537,8 @@ func (p *Provider) nodeFilter(service string, node *api.ServiceEntry) bool {
|
||||
log.Debugf("Service %v pruned by '%v' constraint", service, failingConstraint.String())
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
return p.hasPassingChecks(node)
|
||||
}
|
||||
|
||||
func (p *Provider) isServiceEnabled(node *api.ServiceEntry) bool {
|
||||
@@ -567,6 +572,11 @@ func (p *Provider) getConstraintTags(tags []string) []string {
|
||||
return values
|
||||
}
|
||||
|
||||
func (p *Provider) hasPassingChecks(node *api.ServiceEntry) bool {
|
||||
status := node.Checks.AggregatedStatus()
|
||||
return status == "passing" || !p.StrictChecks && status == "warning"
|
||||
}
|
||||
|
||||
func (p *Provider) generateFrontends(service *serviceUpdate) []*serviceUpdate {
|
||||
frontends := make([]*serviceUpdate, 0)
|
||||
// to support <prefix>.frontend.xxx
|
||||
|
@@ -28,7 +28,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// SwarmAPIVersion is a constant holding the version of the Provider API traefik will use
|
||||
// DockerAPIVersion is a constant holding the version of the Provider API traefik will use
|
||||
DockerAPIVersion = "1.24"
|
||||
// SwarmAPIVersion is a constant holding the version of the Provider API traefik will use.
|
||||
SwarmAPIVersion = "1.24"
|
||||
)
|
||||
|
||||
@@ -110,11 +112,10 @@ func (p *Provider) createClient() (client.APIClient, error) {
|
||||
"User-Agent": "Traefik " + version.Version,
|
||||
}
|
||||
|
||||
var apiVersion string
|
||||
apiVersion := DockerAPIVersion
|
||||
|
||||
if p.SwarmMode {
|
||||
apiVersion = SwarmAPIVersion
|
||||
} else {
|
||||
apiVersion = DockerAPIVersion
|
||||
}
|
||||
|
||||
return client.NewClient(p.Endpoint, apiVersion, httpClient, httpHeaders)
|
||||
|
@@ -1,8 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package docker
|
||||
|
||||
const (
|
||||
// DockerAPIVersion is a constant holding the version of the Provider API traefik will use
|
||||
DockerAPIVersion = "1.21"
|
||||
)
|
@@ -1,6 +0,0 @@
|
||||
package docker
|
||||
|
||||
const (
|
||||
// DockerAPIVersion is a constant holding the version of the Provider API traefik will use
|
||||
DockerAPIVersion string = "1.24"
|
||||
)
|
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: service1
|
||||
namespace: testing
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: 10.10.0.1
|
||||
ports:
|
||||
- port: 8080
|
||||
name: http
|
||||
- port: 1111
|
||||
name: foo
|
@@ -0,0 +1,8 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
namespace: testing
|
||||
spec:
|
||||
backend:
|
||||
serviceName: service1
|
||||
servicePort: 80
|
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: service1
|
||||
namespace: testing
|
||||
spec:
|
||||
clusterIP: 10.0.0.1
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
- name: foo
|
||||
port: 1111
|
||||
targetPort: 1111
|
@@ -508,6 +508,10 @@ func (p *Provider) addGlobalBackend(cl Client, i *extensionsv1beta1.Ingress, tem
|
||||
|
||||
for _, port := range service.Spec.Ports {
|
||||
|
||||
if !equalPorts(port, i.Spec.Backend.ServicePort) {
|
||||
continue
|
||||
}
|
||||
|
||||
// We have to treat external-name service differently here b/c it doesn't have any endpoints
|
||||
if service.Spec.Type == corev1.ServiceTypeExternalName {
|
||||
|
||||
|
@@ -168,6 +168,33 @@ func TestProvider_loadIngresses(t *testing.T) {
|
||||
),
|
||||
),
|
||||
},
|
||||
{
|
||||
desc: "loadGlobalIngressWithMultiplePortNumbers",
|
||||
fixtures: []string{
|
||||
filepath.Join("fixtures", "loadGlobalIngressWithMultiplePortNumbers_ingresses.yml"),
|
||||
filepath.Join("fixtures", "loadGlobalIngressWithMultiplePortNumbers_services.yml"),
|
||||
filepath.Join("fixtures", "loadGlobalIngressWithMultiplePortNumbers_endpoints.yml"),
|
||||
},
|
||||
expected: buildConfiguration(
|
||||
backends(
|
||||
backend("global-default-backend",
|
||||
lbMethod("wrr"),
|
||||
servers(
|
||||
server("http://10.10.0.1:8080", weight(1)),
|
||||
),
|
||||
),
|
||||
),
|
||||
frontends(
|
||||
frontend("global-default-backend",
|
||||
frontendName("global-default-frontend"),
|
||||
passHostHeader(),
|
||||
routes(
|
||||
route("/", "PathPrefix:/"),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
{
|
||||
desc: "loadGlobalIngressWithHttpsPortNames",
|
||||
fixtures: []string{
|
||||
|
@@ -113,7 +113,7 @@ func (s *Server) buildMiddlewares(frontendName string, frontend *types.Frontend,
|
||||
}
|
||||
|
||||
// TLSClientHeaders
|
||||
tlsClientHeadersMiddleware := middlewares.NewTLSClientHeaders(frontend)
|
||||
tlsClientHeadersMiddleware := middlewares.NewTLSClientHeaders(frontend.PassTLSClientCert)
|
||||
if tlsClientHeadersMiddleware != nil {
|
||||
log.Debugf("Adding TLSClientHeaders middleware for frontend %s", frontendName)
|
||||
|
||||
|
@@ -85,7 +85,7 @@ func derCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]by
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: expiration,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment,
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageKeyAgreement | x509.KeyUsageDataEncipherment,
|
||||
BasicConstraintsValid: true,
|
||||
DNSNames: []string{domain},
|
||||
}
|
||||
|
@@ -408,14 +408,14 @@ type Users []string
|
||||
|
||||
// Basic HTTP basic authentication
|
||||
type Basic struct {
|
||||
Users `json:"-" mapstructure:","`
|
||||
Users `json:"-" mapstructure:"," dynamodbav:"users,omitempty"`
|
||||
UsersFile string `json:"usersFile,omitempty"`
|
||||
RemoveHeader bool `json:"removeHeader,omitempty"`
|
||||
}
|
||||
|
||||
// Digest HTTP authentication
|
||||
type Digest struct {
|
||||
Users `json:"-" mapstructure:","`
|
||||
Users `json:"-" mapstructure:"," dynamodbav:"users,omitempty"`
|
||||
UsersFile string `json:"usersFile,omitempty"`
|
||||
RemoveHeader bool `json:"removeHeader,omitempty"`
|
||||
}
|
||||
@@ -511,7 +511,7 @@ type ClientTLS struct {
|
||||
CA string `description:"TLS CA" json:"ca,omitempty"`
|
||||
CAOptional bool `description:"TLS CA.Optional" json:"caOptional,omitempty"`
|
||||
Cert string `description:"TLS cert" json:"cert,omitempty"`
|
||||
Key string `description:"TLS key" json:"-"`
|
||||
Key string `description:"TLS key" json:"-" dynamodbav:"key,omitempty"`
|
||||
InsecureSkipVerify bool `description:"TLS insecure skip verify" json:"insecureSkipVerify,omitempty"`
|
||||
}
|
||||
|
||||
|
201
vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE
generated
vendored
Normal file
201
vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
38
vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
generated
vendored
Normal file
38
vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
var randSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
// retries function fn upto n times, if fn returns an error lest it returns nil early.
|
||||
// It applies exponential backoff in units of (1<<n) + jitter microsends.
|
||||
func nTriesWithExponentialBackoff(nTries int64, timeBaseUnit time.Duration, fn func() error) (err error) {
|
||||
for i := int64(0); i < nTries; i++ {
|
||||
err = fn()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
// Backoff for a time period with a pseudo-random jitter
|
||||
jitter := time.Duration(randSrc.Float64()*100) * time.Microsecond
|
||||
ts := jitter + ((1 << uint64(i)) * timeBaseUnit)
|
||||
<-time.After(ts)
|
||||
}
|
||||
return err
|
||||
}
|
97
vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go
generated
vendored
Normal file
97
vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
sDisconnected int32 = 5 + iota
|
||||
sConnected
|
||||
)
|
||||
|
||||
func (ae *Exporter) setStateDisconnected() {
|
||||
atomic.StoreInt32(&ae.connectionState, sDisconnected)
|
||||
select {
|
||||
case ae.disconnectedCh <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (ae *Exporter) setStateConnected() {
|
||||
atomic.StoreInt32(&ae.connectionState, sConnected)
|
||||
}
|
||||
|
||||
func (ae *Exporter) connected() bool {
|
||||
return atomic.LoadInt32(&ae.connectionState) == sConnected
|
||||
}
|
||||
|
||||
const defaultConnReattemptPeriod = 10 * time.Second
|
||||
|
||||
func (ae *Exporter) indefiniteBackgroundConnection() error {
|
||||
defer func() {
|
||||
ae.backgroundConnectionDoneCh <- true
|
||||
}()
|
||||
|
||||
connReattemptPeriod := ae.reconnectionPeriod
|
||||
if connReattemptPeriod <= 0 {
|
||||
connReattemptPeriod = defaultConnReattemptPeriod
|
||||
}
|
||||
|
||||
// No strong seeding required, nano time can
|
||||
// already help with pseudo uniqueness.
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024)))
|
||||
|
||||
// maxJitter: 1 + (70% of the connectionReattemptPeriod)
|
||||
maxJitter := int64(1 + 0.7*float64(connReattemptPeriod))
|
||||
|
||||
for {
|
||||
// Otherwise these will be the normal scenarios to enable
|
||||
// reconnections if we trip out.
|
||||
// 1. If we've stopped, return entirely
|
||||
// 2. Otherwise block until we are disconnected, and
|
||||
// then retry connecting
|
||||
select {
|
||||
case <-ae.stopCh:
|
||||
return errStopped
|
||||
|
||||
case <-ae.disconnectedCh:
|
||||
// Normal scenario that we'll wait for
|
||||
}
|
||||
|
||||
if err := ae.connect(); err == nil {
|
||||
ae.setStateConnected()
|
||||
} else {
|
||||
ae.setStateDisconnected()
|
||||
}
|
||||
|
||||
// Apply some jitter to avoid lockstep retrials of other
|
||||
// agent-exporters. Lockstep retrials could result in an
|
||||
// innocent DDOS, by clogging the machine's resources and network.
|
||||
jitter := time.Duration(rng.Int63n(maxJitter))
|
||||
<-time.After(connReattemptPeriod + jitter)
|
||||
}
|
||||
}
|
||||
|
||||
func (ae *Exporter) connect() error {
|
||||
cc, err := ae.dialToAgent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ae.enableConnectionStreams(cc)
|
||||
}
|
46
vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go
generated
vendored
Normal file
46
vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
||||
"go.opencensus.io"
|
||||
)
|
||||
|
||||
// NodeWithStartTime creates a node using nodeName and derives:
|
||||
// Hostname from the environment
|
||||
// Pid from the current process
|
||||
// StartTimestamp from the start time of this process
|
||||
// Language and library information.
|
||||
func NodeWithStartTime(nodeName string) *commonpb.Node {
|
||||
return &commonpb.Node{
|
||||
Identifier: &commonpb.ProcessIdentifier{
|
||||
HostName: os.Getenv("HOSTNAME"),
|
||||
Pid: uint32(os.Getpid()),
|
||||
StartTimestamp: timeToTimestamp(startTime),
|
||||
},
|
||||
LibraryInfo: &commonpb.LibraryInfo{
|
||||
Language: commonpb.LibraryInfo_GO_LANG,
|
||||
ExporterVersion: Version,
|
||||
CoreLibraryVersion: opencensus.Version(),
|
||||
},
|
||||
ServiceInfo: &commonpb.ServiceInfo{
|
||||
Name: nodeName,
|
||||
},
|
||||
Attributes: make(map[string]string),
|
||||
}
|
||||
}
|
496
vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go
generated
vendored
Normal file
496
vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go
generated
vendored
Normal file
@@ -0,0 +1,496 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/api/support/bundler"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"go.opencensus.io/plugin/ocgrpc"
|
||||
"go.opencensus.io/resource"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
||||
agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1"
|
||||
agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1"
|
||||
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
||||
resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
|
||||
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
|
||||
)
|
||||
|
||||
var startupMu sync.Mutex
|
||||
var startTime time.Time
|
||||
|
||||
func init() {
|
||||
startupMu.Lock()
|
||||
startTime = time.Now()
|
||||
startupMu.Unlock()
|
||||
}
|
||||
|
||||
var _ trace.Exporter = (*Exporter)(nil)
|
||||
var _ view.Exporter = (*Exporter)(nil)
|
||||
|
||||
type Exporter struct {
|
||||
connectionState int32
|
||||
|
||||
// mu protects the non-atomic and non-channel variables
|
||||
mu sync.RWMutex
|
||||
// senderMu protects the concurrent unsafe traceExporter client
|
||||
senderMu sync.RWMutex
|
||||
started bool
|
||||
stopped bool
|
||||
agentAddress string
|
||||
serviceName string
|
||||
canDialInsecure bool
|
||||
traceExporter agenttracepb.TraceService_ExportClient
|
||||
metricsExporter agentmetricspb.MetricsService_ExportClient
|
||||
nodeInfo *commonpb.Node
|
||||
grpcClientConn *grpc.ClientConn
|
||||
reconnectionPeriod time.Duration
|
||||
resource *resourcepb.Resource
|
||||
compressor string
|
||||
headers map[string]string
|
||||
|
||||
startOnce sync.Once
|
||||
stopCh chan bool
|
||||
disconnectedCh chan bool
|
||||
|
||||
backgroundConnectionDoneCh chan bool
|
||||
|
||||
traceBundler *bundler.Bundler
|
||||
|
||||
// viewDataBundler is the bundler to enable conversion
|
||||
// from OpenCensus-Go view.Data to metricspb.Metric.
|
||||
// Please do not confuse it with metricsBundler!
|
||||
viewDataBundler *bundler.Bundler
|
||||
|
||||
clientTransportCredentials credentials.TransportCredentials
|
||||
}
|
||||
|
||||
func NewExporter(opts ...ExporterOption) (*Exporter, error) {
|
||||
exp, err := NewUnstartedExporter(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := exp.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return exp, nil
|
||||
}
|
||||
|
||||
const spanDataBufferSize = 300
|
||||
|
||||
func NewUnstartedExporter(opts ...ExporterOption) (*Exporter, error) {
|
||||
e := new(Exporter)
|
||||
for _, opt := range opts {
|
||||
opt.withExporter(e)
|
||||
}
|
||||
traceBundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) {
|
||||
e.uploadTraces(bundle.([]*trace.SpanData))
|
||||
})
|
||||
traceBundler.DelayThreshold = 2 * time.Second
|
||||
traceBundler.BundleCountThreshold = spanDataBufferSize
|
||||
e.traceBundler = traceBundler
|
||||
|
||||
viewDataBundler := bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) {
|
||||
e.uploadViewData(bundle.([]*view.Data))
|
||||
})
|
||||
viewDataBundler.DelayThreshold = 2 * time.Second
|
||||
viewDataBundler.BundleCountThreshold = 500 // TODO: (@odeke-em) make this configurable.
|
||||
e.viewDataBundler = viewDataBundler
|
||||
e.nodeInfo = NodeWithStartTime(e.serviceName)
|
||||
e.resource = resourceProtoFromEnv()
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
const (
|
||||
maxInitialConfigRetries = 10
|
||||
maxInitialTracesRetries = 10
|
||||
)
|
||||
|
||||
var (
|
||||
errAlreadyStarted = errors.New("already started")
|
||||
errNotStarted = errors.New("not started")
|
||||
errStopped = errors.New("stopped")
|
||||
errNoConnection = errors.New("no active connection")
|
||||
)
|
||||
|
||||
// Start dials to the agent, establishing a connection to it. It also
|
||||
// initiates the Config and Trace services by sending over the initial
|
||||
// messages that consist of the node identifier. Start invokes a background
|
||||
// connector that will reattempt connections to the agent periodically
|
||||
// if the connection dies.
|
||||
func (ae *Exporter) Start() error {
|
||||
var err = errAlreadyStarted
|
||||
ae.startOnce.Do(func() {
|
||||
ae.mu.Lock()
|
||||
defer ae.mu.Unlock()
|
||||
|
||||
ae.started = true
|
||||
ae.disconnectedCh = make(chan bool, 1)
|
||||
ae.stopCh = make(chan bool)
|
||||
ae.backgroundConnectionDoneCh = make(chan bool)
|
||||
|
||||
ae.setStateDisconnected()
|
||||
go ae.indefiniteBackgroundConnection()
|
||||
|
||||
err = nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (ae *Exporter) prepareAgentAddress() string {
|
||||
if ae.agentAddress != "" {
|
||||
return ae.agentAddress
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", DefaultAgentHost, DefaultAgentPort)
|
||||
}
|
||||
|
||||
func (ae *Exporter) enableConnectionStreams(cc *grpc.ClientConn) error {
|
||||
ae.mu.RLock()
|
||||
started := ae.started
|
||||
nodeInfo := ae.nodeInfo
|
||||
ae.mu.RUnlock()
|
||||
|
||||
if !started {
|
||||
return errNotStarted
|
||||
}
|
||||
|
||||
ae.mu.Lock()
|
||||
// If the previous clientConn was non-nil, close it
|
||||
if ae.grpcClientConn != nil {
|
||||
_ = ae.grpcClientConn.Close()
|
||||
}
|
||||
ae.grpcClientConn = cc
|
||||
ae.mu.Unlock()
|
||||
|
||||
if err := ae.createTraceServiceConnection(ae.grpcClientConn, nodeInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ae.createMetricsServiceConnection(ae.grpcClientConn, nodeInfo)
|
||||
}
|
||||
|
||||
func (ae *Exporter) createTraceServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
|
||||
// Initiate the trace service by sending over node identifier info.
|
||||
traceSvcClient := agenttracepb.NewTraceServiceClient(cc)
|
||||
ctx := context.Background()
|
||||
if len(ae.headers) > 0 {
|
||||
ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
|
||||
}
|
||||
traceExporter, err := traceSvcClient.Export(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err)
|
||||
}
|
||||
|
||||
firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{
|
||||
Node: node,
|
||||
Resource: ae.resource,
|
||||
}
|
||||
if err := traceExporter.Send(firstTraceMessage); err != nil {
|
||||
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
|
||||
}
|
||||
|
||||
ae.mu.Lock()
|
||||
ae.traceExporter = traceExporter
|
||||
ae.mu.Unlock()
|
||||
|
||||
// Initiate the config service by sending over node identifier info.
|
||||
configStream, err := traceSvcClient.Config(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err)
|
||||
}
|
||||
firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node}
|
||||
if err := configStream.Send(firstCfgMessage); err != nil {
|
||||
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
|
||||
}
|
||||
|
||||
// In the background, handle trace configurations that are beamed down
|
||||
// by the agent, but also reply to it with the applied configuration.
|
||||
go ae.handleConfigStreaming(configStream)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
|
||||
metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc)
|
||||
metricsExporter, err := metricsSvcClient.Export(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err)
|
||||
}
|
||||
// Initiate the metrics service by sending over the first message just containing the Node and Resource.
|
||||
firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{
|
||||
Node: node,
|
||||
Resource: ae.resource,
|
||||
}
|
||||
if err := metricsExporter.Send(firstMetricsMessage); err != nil {
|
||||
return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err)
|
||||
}
|
||||
|
||||
ae.mu.Lock()
|
||||
ae.metricsExporter = metricsExporter
|
||||
ae.mu.Unlock()
|
||||
|
||||
// With that we are good to go and can start sending metrics
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) {
|
||||
addr := ae.prepareAgentAddress()
|
||||
var dialOpts []grpc.DialOption
|
||||
if ae.clientTransportCredentials != nil {
|
||||
dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials))
|
||||
} else if ae.canDialInsecure {
|
||||
dialOpts = append(dialOpts, grpc.WithInsecure())
|
||||
}
|
||||
if ae.compressor != "" {
|
||||
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor)))
|
||||
}
|
||||
dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
|
||||
|
||||
ctx := context.Background()
|
||||
if len(ae.headers) > 0 {
|
||||
ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
|
||||
}
|
||||
return grpc.DialContext(ctx, addr, dialOpts...)
|
||||
}
|
||||
|
||||
func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error {
|
||||
// Note: We haven't yet implemented configuration sending so we
|
||||
// should NOT be changing connection states within this function for now.
|
||||
for {
|
||||
recv, err := configStream.Recv()
|
||||
if err != nil {
|
||||
// TODO: Check if this is a transient error or exponential backoff-able.
|
||||
return err
|
||||
}
|
||||
cfg := recv.Config
|
||||
if cfg == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise now apply the trace configuration sent down from the agent
|
||||
if psamp := cfg.GetProbabilitySampler(); psamp != nil {
|
||||
trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)})
|
||||
} else if csamp := cfg.GetConstantSampler(); csamp != nil {
|
||||
alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON
|
||||
if alwaysSample {
|
||||
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||
} else {
|
||||
trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()})
|
||||
}
|
||||
} else { // TODO: Add the rate limiting sampler here
|
||||
}
|
||||
|
||||
// Then finally send back to upstream the newly applied configuration
|
||||
err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop shuts down all the connections and resources
|
||||
// related to the exporter.
|
||||
func (ae *Exporter) Stop() error {
|
||||
ae.mu.RLock()
|
||||
cc := ae.grpcClientConn
|
||||
started := ae.started
|
||||
stopped := ae.stopped
|
||||
ae.mu.RUnlock()
|
||||
|
||||
if !started {
|
||||
return errNotStarted
|
||||
}
|
||||
if stopped {
|
||||
// TODO: tell the user that we've already stopped, so perhaps a sentinel error?
|
||||
return nil
|
||||
}
|
||||
|
||||
ae.Flush()
|
||||
|
||||
// Now close the underlying gRPC connection.
|
||||
var err error
|
||||
if cc != nil {
|
||||
err = cc.Close()
|
||||
}
|
||||
|
||||
// At this point we can change the state variables: started and stopped
|
||||
ae.mu.Lock()
|
||||
ae.started = false
|
||||
ae.stopped = true
|
||||
ae.mu.Unlock()
|
||||
close(ae.stopCh)
|
||||
|
||||
// Ensure that the backgroundConnector returns
|
||||
<-ae.backgroundConnectionDoneCh
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (ae *Exporter) ExportSpan(sd *trace.SpanData) {
|
||||
if sd == nil {
|
||||
return
|
||||
}
|
||||
_ = ae.traceBundler.Add(sd, 1)
|
||||
}
|
||||
|
||||
func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error {
|
||||
if batch == nil || len(batch.Spans) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ae.stopCh:
|
||||
return errStopped
|
||||
|
||||
default:
|
||||
if !ae.connected() {
|
||||
return errNoConnection
|
||||
}
|
||||
|
||||
ae.senderMu.Lock()
|
||||
err := ae.traceExporter.Send(batch)
|
||||
ae.senderMu.Unlock()
|
||||
if err != nil {
|
||||
ae.setStateDisconnected()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ae *Exporter) ExportView(vd *view.Data) {
|
||||
if vd == nil {
|
||||
return
|
||||
}
|
||||
_ = ae.viewDataBundler.Add(vd, 1)
|
||||
}
|
||||
|
||||
func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span {
|
||||
if len(sdl) == 0 {
|
||||
return nil
|
||||
}
|
||||
protoSpans := make([]*tracepb.Span, 0, len(sdl))
|
||||
for _, sd := range sdl {
|
||||
if sd != nil {
|
||||
protoSpans = append(protoSpans, ocSpanToProtoSpan(sd))
|
||||
}
|
||||
}
|
||||
return protoSpans
|
||||
}
|
||||
|
||||
func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) {
|
||||
select {
|
||||
case <-ae.stopCh:
|
||||
return
|
||||
|
||||
default:
|
||||
if !ae.connected() {
|
||||
return
|
||||
}
|
||||
|
||||
protoSpans := ocSpanDataToPbSpans(sdl)
|
||||
if len(protoSpans) == 0 {
|
||||
return
|
||||
}
|
||||
ae.senderMu.Lock()
|
||||
err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{
|
||||
Spans: protoSpans,
|
||||
})
|
||||
ae.senderMu.Unlock()
|
||||
if err != nil {
|
||||
ae.setStateDisconnected()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric {
|
||||
if len(vdl) == 0 {
|
||||
return nil
|
||||
}
|
||||
metrics := make([]*metricspb.Metric, 0, len(vdl))
|
||||
for _, vd := range vdl {
|
||||
if vd != nil {
|
||||
vmetric, err := viewDataToMetric(vd)
|
||||
// TODO: (@odeke-em) somehow report this error, if it is non-nil.
|
||||
if err == nil && vmetric != nil {
|
||||
metrics = append(metrics, vmetric)
|
||||
}
|
||||
}
|
||||
}
|
||||
return metrics
|
||||
}
|
||||
|
||||
func (ae *Exporter) uploadViewData(vdl []*view.Data) {
|
||||
select {
|
||||
case <-ae.stopCh:
|
||||
return
|
||||
|
||||
default:
|
||||
if !ae.connected() {
|
||||
return
|
||||
}
|
||||
|
||||
protoMetrics := ocViewDataToPbMetrics(vdl)
|
||||
if len(protoMetrics) == 0 {
|
||||
return
|
||||
}
|
||||
err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{
|
||||
Metrics: protoMetrics,
|
||||
// TODO:(@odeke-em)
|
||||
// a) Figure out how to derive a Node from the environment
|
||||
// b) Figure out how to derive a Resource from the environment
|
||||
// or better letting users of the exporter configure it.
|
||||
})
|
||||
if err != nil {
|
||||
ae.setStateDisconnected()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ae *Exporter) Flush() {
|
||||
ae.traceBundler.Flush()
|
||||
ae.viewDataBundler.Flush()
|
||||
}
|
||||
|
||||
func resourceProtoFromEnv() *resourcepb.Resource {
|
||||
rs, _ := resource.FromEnv(context.Background())
|
||||
if rs == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
rprs := &resourcepb.Resource{
|
||||
Type: rs.Type,
|
||||
}
|
||||
if rs.Labels != nil {
|
||||
rprs.Labels = make(map[string]string)
|
||||
for k, v := range rs.Labels {
|
||||
rprs.Labels[k] = v
|
||||
}
|
||||
}
|
||||
return rprs
|
||||
}
|
128
vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
generated
vendored
Normal file
128
vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultAgentPort uint16 = 55678
|
||||
DefaultAgentHost string = "localhost"
|
||||
)
|
||||
|
||||
type ExporterOption interface {
|
||||
withExporter(e *Exporter)
|
||||
}
|
||||
|
||||
type insecureGrpcConnection int
|
||||
|
||||
var _ ExporterOption = (*insecureGrpcConnection)(nil)
|
||||
|
||||
func (igc *insecureGrpcConnection) withExporter(e *Exporter) {
|
||||
e.canDialInsecure = true
|
||||
}
|
||||
|
||||
// WithInsecure disables client transport security for the exporter's gRPC connection
|
||||
// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure
|
||||
// does. Note, by default, client security is required unless WithInsecure is used.
|
||||
func WithInsecure() ExporterOption { return new(insecureGrpcConnection) }
|
||||
|
||||
type addressSetter string
|
||||
|
||||
func (as addressSetter) withExporter(e *Exporter) {
|
||||
e.agentAddress = string(as)
|
||||
}
|
||||
|
||||
var _ ExporterOption = (*addressSetter)(nil)
|
||||
|
||||
// WithAddress allows one to set the address that the exporter will
|
||||
// connect to the agent on. If unset, it will instead try to use
|
||||
// connect to DefaultAgentHost:DefaultAgentPort
|
||||
func WithAddress(addr string) ExporterOption {
|
||||
return addressSetter(addr)
|
||||
}
|
||||
|
||||
type serviceNameSetter string
|
||||
|
||||
func (sns serviceNameSetter) withExporter(e *Exporter) {
|
||||
e.serviceName = string(sns)
|
||||
}
|
||||
|
||||
var _ ExporterOption = (*serviceNameSetter)(nil)
|
||||
|
||||
// WithServiceName allows one to set/override the service name
|
||||
// that the exporter will report to the agent.
|
||||
func WithServiceName(serviceName string) ExporterOption {
|
||||
return serviceNameSetter(serviceName)
|
||||
}
|
||||
|
||||
type reconnectionPeriod time.Duration
|
||||
|
||||
func (rp reconnectionPeriod) withExporter(e *Exporter) {
|
||||
e.reconnectionPeriod = time.Duration(rp)
|
||||
}
|
||||
|
||||
func WithReconnectionPeriod(rp time.Duration) ExporterOption {
|
||||
return reconnectionPeriod(rp)
|
||||
}
|
||||
|
||||
type compressorSetter string
|
||||
|
||||
func (c compressorSetter) withExporter(e *Exporter) {
|
||||
e.compressor = string(c)
|
||||
}
|
||||
|
||||
// UseCompressor will set the compressor for the gRPC client to use when sending requests.
|
||||
// It is the responsibility of the caller to ensure that the compressor set has been registered
|
||||
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
|
||||
// compressors auto-register on import, such as gzip, which can be registered by calling
|
||||
// `import _ "google.golang.org/grpc/encoding/gzip"`
|
||||
func UseCompressor(compressorName string) ExporterOption {
|
||||
return compressorSetter(compressorName)
|
||||
}
|
||||
|
||||
type headerSetter map[string]string
|
||||
|
||||
func (h headerSetter) withExporter(e *Exporter) {
|
||||
e.headers = map[string]string(h)
|
||||
}
|
||||
|
||||
// WithHeaders will send the provided headers when the gRPC stream connection
|
||||
// is instantiated
|
||||
func WithHeaders(headers map[string]string) ExporterOption {
|
||||
return headerSetter(headers)
|
||||
}
|
||||
|
||||
type clientCredentials struct {
|
||||
credentials.TransportCredentials
|
||||
}
|
||||
|
||||
var _ ExporterOption = (*clientCredentials)(nil)
|
||||
|
||||
// WithTLSCredentials allows the connection to use TLS credentials
|
||||
// when talking to the server. It takes in grpc.TransportCredentials instead
|
||||
// of say a Certificate file or a tls.Certificate, because the retrieving
|
||||
// these credentials can be done in many ways e.g. plain file, in code tls.Config
|
||||
// or by certificate rotation, so it is up to the caller to decide what to use.
|
||||
func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption {
|
||||
return &clientCredentials{TransportCredentials: creds}
|
||||
}
|
||||
|
||||
func (cc *clientCredentials) withExporter(e *Exporter) {
|
||||
e.clientTransportCredentials = cc.TransportCredentials
|
||||
}
|
248
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
generated
vendored
Normal file
248
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
generated
vendored
Normal file
@@ -0,0 +1,248 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
"go.opencensus.io/trace/tracestate"
|
||||
|
||||
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
const (
|
||||
maxAnnotationEventsPerSpan = 32
|
||||
maxMessageEventsPerSpan = 128
|
||||
)
|
||||
|
||||
func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span {
|
||||
if sd == nil {
|
||||
return nil
|
||||
}
|
||||
var namePtr *tracepb.TruncatableString
|
||||
if sd.Name != "" {
|
||||
namePtr = &tracepb.TruncatableString{Value: sd.Name}
|
||||
}
|
||||
return &tracepb.Span{
|
||||
TraceId: sd.TraceID[:],
|
||||
SpanId: sd.SpanID[:],
|
||||
ParentSpanId: sd.ParentSpanID[:],
|
||||
Status: ocStatusToProtoStatus(sd.Status),
|
||||
StartTime: timeToTimestamp(sd.StartTime),
|
||||
EndTime: timeToTimestamp(sd.EndTime),
|
||||
Links: ocLinksToProtoLinks(sd.Links),
|
||||
Kind: ocSpanKindToProtoSpanKind(sd.SpanKind),
|
||||
Name: namePtr,
|
||||
Attributes: ocAttributesToProtoAttributes(sd.Attributes),
|
||||
TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents),
|
||||
Tracestate: ocTracestateToProtoTracestate(sd.Tracestate),
|
||||
}
|
||||
}
|
||||
|
||||
var blankStatus trace.Status
|
||||
|
||||
func ocStatusToProtoStatus(status trace.Status) *tracepb.Status {
|
||||
if status == blankStatus {
|
||||
return nil
|
||||
}
|
||||
return &tracepb.Status{
|
||||
Code: status.Code,
|
||||
Message: status.Message,
|
||||
}
|
||||
}
|
||||
|
||||
func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links {
|
||||
if len(links) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sl := make([]*tracepb.Span_Link, 0, len(links))
|
||||
for _, ocLink := range links {
|
||||
// This redefinition is necessary to prevent ocLink.*ID[:] copies
|
||||
// being reused -- in short we need a new ocLink per iteration.
|
||||
ocLink := ocLink
|
||||
|
||||
sl = append(sl, &tracepb.Span_Link{
|
||||
TraceId: ocLink.TraceID[:],
|
||||
SpanId: ocLink.SpanID[:],
|
||||
Type: ocLinkTypeToProtoLinkType(ocLink.Type),
|
||||
})
|
||||
}
|
||||
|
||||
return &tracepb.Span_Links{
|
||||
Link: sl,
|
||||
}
|
||||
}
|
||||
|
||||
func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type {
|
||||
switch oct {
|
||||
case trace.LinkTypeChild:
|
||||
return tracepb.Span_Link_CHILD_LINKED_SPAN
|
||||
case trace.LinkTypeParent:
|
||||
return tracepb.Span_Link_PARENT_LINKED_SPAN
|
||||
default:
|
||||
return tracepb.Span_Link_TYPE_UNSPECIFIED
|
||||
}
|
||||
}
|
||||
|
||||
func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes {
|
||||
if len(attrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
outMap := make(map[string]*tracepb.AttributeValue)
|
||||
for k, v := range attrs {
|
||||
switch v := v.(type) {
|
||||
case bool:
|
||||
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}}
|
||||
|
||||
case int:
|
||||
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}}
|
||||
|
||||
case int64:
|
||||
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}}
|
||||
|
||||
case string:
|
||||
outMap[k] = &tracepb.AttributeValue{
|
||||
Value: &tracepb.AttributeValue_StringValue{
|
||||
StringValue: &tracepb.TruncatableString{Value: v},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
return &tracepb.Span_Attributes{
|
||||
AttributeMap: outMap,
|
||||
}
|
||||
}
|
||||
|
||||
// This code is mostly copied from
|
||||
// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46
|
||||
func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents {
|
||||
if len(as) == 0 && len(es) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
timeEvents := &tracepb.Span_TimeEvents{}
|
||||
var annotations, droppedAnnotationsCount int
|
||||
var messageEvents, droppedMessageEventsCount int
|
||||
|
||||
// Transform annotations
|
||||
for i, a := range as {
|
||||
if annotations >= maxAnnotationEventsPerSpan {
|
||||
droppedAnnotationsCount = len(as) - i
|
||||
break
|
||||
}
|
||||
annotations++
|
||||
timeEvents.TimeEvent = append(timeEvents.TimeEvent,
|
||||
&tracepb.Span_TimeEvent{
|
||||
Time: timeToTimestamp(a.Time),
|
||||
Value: transformAnnotationToTimeEvent(&a),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Transform message events
|
||||
for i, e := range es {
|
||||
if messageEvents >= maxMessageEventsPerSpan {
|
||||
droppedMessageEventsCount = len(es) - i
|
||||
break
|
||||
}
|
||||
messageEvents++
|
||||
timeEvents.TimeEvent = append(timeEvents.TimeEvent,
|
||||
&tracepb.Span_TimeEvent{
|
||||
Time: timeToTimestamp(e.Time),
|
||||
Value: transformMessageEventToTimeEvent(&e),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Process dropped counter
|
||||
timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount)
|
||||
timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount)
|
||||
|
||||
return timeEvents
|
||||
}
|
||||
|
||||
func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ {
|
||||
return &tracepb.Span_TimeEvent_Annotation_{
|
||||
Annotation: &tracepb.Span_TimeEvent_Annotation{
|
||||
Description: &tracepb.TruncatableString{Value: a.Message},
|
||||
Attributes: ocAttributesToProtoAttributes(a.Attributes),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ {
|
||||
return &tracepb.Span_TimeEvent_MessageEvent_{
|
||||
MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
|
||||
Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType),
|
||||
Id: uint64(e.MessageID),
|
||||
UncompressedSize: uint64(e.UncompressedByteSize),
|
||||
CompressedSize: uint64(e.CompressedByteSize),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// clip32 clips an int to the range of an int32.
|
||||
func clip32(x int) int32 {
|
||||
if x < math.MinInt32 {
|
||||
return math.MinInt32
|
||||
}
|
||||
if x > math.MaxInt32 {
|
||||
return math.MaxInt32
|
||||
}
|
||||
return int32(x)
|
||||
}
|
||||
|
||||
func timeToTimestamp(t time.Time) *timestamp.Timestamp {
|
||||
nanoTime := t.UnixNano()
|
||||
return ×tamp.Timestamp{
|
||||
Seconds: nanoTime / 1e9,
|
||||
Nanos: int32(nanoTime % 1e9),
|
||||
}
|
||||
}
|
||||
|
||||
func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind {
|
||||
switch kind {
|
||||
case trace.SpanKindClient:
|
||||
return tracepb.Span_CLIENT
|
||||
case trace.SpanKindServer:
|
||||
return tracepb.Span_SERVER
|
||||
default:
|
||||
return tracepb.Span_SPAN_KIND_UNSPECIFIED
|
||||
}
|
||||
}
|
||||
|
||||
func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate {
|
||||
if ts == nil {
|
||||
return nil
|
||||
}
|
||||
return &tracepb.Span_Tracestate{
|
||||
Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()),
|
||||
}
|
||||
}
|
||||
|
||||
func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry {
|
||||
protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{
|
||||
Key: entry.Key,
|
||||
Value: entry.Value,
|
||||
})
|
||||
}
|
||||
return protoEntries
|
||||
}
|
274
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
generated
vendored
Normal file
274
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
generated
vendored
Normal file
@@ -0,0 +1,274 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
|
||||
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
errNilMeasure = errors.New("expecting a non-nil stats.Measure")
|
||||
errNilView = errors.New("expecting a non-nil view.View")
|
||||
errNilViewData = errors.New("expecting a non-nil view.Data")
|
||||
)
|
||||
|
||||
func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) {
|
||||
if vd == nil {
|
||||
return nil, errNilViewData
|
||||
}
|
||||
|
||||
descriptor, err := viewToMetricDescriptor(vd.View)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
timeseries, err := viewDataToTimeseries(vd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metric := &metricspb.Metric{
|
||||
MetricDescriptor: descriptor,
|
||||
Timeseries: timeseries,
|
||||
}
|
||||
return metric, nil
|
||||
}
|
||||
|
||||
func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) {
|
||||
if v == nil {
|
||||
return nil, errNilView
|
||||
}
|
||||
if v.Measure == nil {
|
||||
return nil, errNilMeasure
|
||||
}
|
||||
|
||||
desc := &metricspb.MetricDescriptor{
|
||||
Name: stringOrCall(v.Name, v.Measure.Name),
|
||||
Description: stringOrCall(v.Description, v.Measure.Description),
|
||||
Unit: v.Measure.Unit(),
|
||||
Type: aggregationToMetricDescriptorType(v),
|
||||
LabelKeys: tagKeysToLabelKeys(v.TagKeys),
|
||||
}
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
func stringOrCall(first string, call func() string) string {
|
||||
if first != "" {
|
||||
return first
|
||||
}
|
||||
return call()
|
||||
}
|
||||
|
||||
type measureType uint
|
||||
|
||||
const (
|
||||
measureUnknown measureType = iota
|
||||
measureInt64
|
||||
measureFloat64
|
||||
)
|
||||
|
||||
func measureTypeFromMeasure(m stats.Measure) measureType {
|
||||
switch m.(type) {
|
||||
default:
|
||||
return measureUnknown
|
||||
case *stats.Float64Measure:
|
||||
return measureFloat64
|
||||
case *stats.Int64Measure:
|
||||
return measureInt64
|
||||
}
|
||||
}
|
||||
|
||||
func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type {
|
||||
if v == nil || v.Aggregation == nil {
|
||||
return metricspb.MetricDescriptor_UNSPECIFIED
|
||||
}
|
||||
if v.Measure == nil {
|
||||
return metricspb.MetricDescriptor_UNSPECIFIED
|
||||
}
|
||||
|
||||
switch v.Aggregation.Type {
|
||||
case view.AggTypeCount:
|
||||
// Cumulative on int64
|
||||
return metricspb.MetricDescriptor_CUMULATIVE_INT64
|
||||
|
||||
case view.AggTypeDistribution:
|
||||
// Cumulative types
|
||||
return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION
|
||||
|
||||
case view.AggTypeLastValue:
|
||||
// Gauge types
|
||||
switch measureTypeFromMeasure(v.Measure) {
|
||||
case measureFloat64:
|
||||
return metricspb.MetricDescriptor_GAUGE_DOUBLE
|
||||
case measureInt64:
|
||||
return metricspb.MetricDescriptor_GAUGE_INT64
|
||||
}
|
||||
|
||||
case view.AggTypeSum:
|
||||
// Cumulative types
|
||||
switch measureTypeFromMeasure(v.Measure) {
|
||||
case measureFloat64:
|
||||
return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE
|
||||
case measureInt64:
|
||||
return metricspb.MetricDescriptor_CUMULATIVE_INT64
|
||||
}
|
||||
}
|
||||
|
||||
// For all other cases, return unspecified.
|
||||
return metricspb.MetricDescriptor_UNSPECIFIED
|
||||
}
|
||||
|
||||
func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey {
|
||||
labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys))
|
||||
for _, tagKey := range tagKeys {
|
||||
labelKeys = append(labelKeys, &metricspb.LabelKey{
|
||||
Key: tagKey.Name(),
|
||||
})
|
||||
}
|
||||
return labelKeys
|
||||
}
|
||||
|
||||
func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) {
|
||||
if vd == nil || len(vd.Rows) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Given that view.Data only contains Start, End
|
||||
// the timestamps for all the row data will be the exact same
|
||||
// per aggregation. However, the values will differ.
|
||||
// Each row has its own tags.
|
||||
startTimestamp := timeToProtoTimestamp(vd.Start)
|
||||
endTimestamp := timeToProtoTimestamp(vd.End)
|
||||
|
||||
mType := measureTypeFromMeasure(vd.View.Measure)
|
||||
timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows))
|
||||
// It is imperative that the ordering of "LabelValues" matches those
|
||||
// of the Label keys in the metric descriptor.
|
||||
for _, row := range vd.Rows {
|
||||
labelValues := labelValuesFromTags(row.Tags)
|
||||
point := rowToPoint(vd.View, row, endTimestamp, mType)
|
||||
timeseries = append(timeseries, &metricspb.TimeSeries{
|
||||
StartTimestamp: startTimestamp,
|
||||
LabelValues: labelValues,
|
||||
Points: []*metricspb.Point{point},
|
||||
})
|
||||
}
|
||||
|
||||
if len(timeseries) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return timeseries, nil
|
||||
}
|
||||
|
||||
func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp {
|
||||
unixNano := t.UnixNano()
|
||||
return ×tamp.Timestamp{
|
||||
Seconds: int64(unixNano / 1e9),
|
||||
Nanos: int32(unixNano % 1e9),
|
||||
}
|
||||
}
|
||||
|
||||
func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point {
|
||||
pt := &metricspb.Point{
|
||||
Timestamp: endTimestamp,
|
||||
}
|
||||
|
||||
switch data := row.Data.(type) {
|
||||
case *view.CountData:
|
||||
pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value}
|
||||
|
||||
case *view.DistributionData:
|
||||
pt.Value = &metricspb.Point_DistributionValue{
|
||||
DistributionValue: &metricspb.DistributionValue{
|
||||
Count: data.Count,
|
||||
Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count
|
||||
// TODO: Add Exemplar
|
||||
Buckets: bucketsToProtoBuckets(data.CountPerBucket),
|
||||
BucketOptions: &metricspb.DistributionValue_BucketOptions{
|
||||
Type: &metricspb.DistributionValue_BucketOptions_Explicit_{
|
||||
Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{
|
||||
Bounds: v.Aggregation.Buckets,
|
||||
},
|
||||
},
|
||||
},
|
||||
SumOfSquaredDeviation: data.SumOfSquaredDev,
|
||||
}}
|
||||
|
||||
case *view.LastValueData:
|
||||
setPointValue(pt, data.Value, mType)
|
||||
|
||||
case *view.SumData:
|
||||
setPointValue(pt, data.Value, mType)
|
||||
}
|
||||
|
||||
return pt
|
||||
}
|
||||
|
||||
// Not returning anything from this function because metricspb.Point.is_Value is an unexported
|
||||
// interface hence we just have to set its value by pointer.
|
||||
func setPointValue(pt *metricspb.Point, value float64, mType measureType) {
|
||||
if mType == measureInt64 {
|
||||
pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)}
|
||||
} else {
|
||||
pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value}
|
||||
}
|
||||
}
|
||||
|
||||
func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket {
|
||||
distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket))
|
||||
for i := 0; i < len(countPerBucket); i++ {
|
||||
count := countPerBucket[i]
|
||||
|
||||
distBuckets[i] = &metricspb.DistributionValue_Bucket{
|
||||
Count: count,
|
||||
}
|
||||
}
|
||||
|
||||
return distBuckets
|
||||
}
|
||||
|
||||
func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue {
|
||||
if len(tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
labelValues := make([]*metricspb.LabelValue, 0, len(tags))
|
||||
for _, tag_ := range tags {
|
||||
labelValues = append(labelValues, &metricspb.LabelValue{
|
||||
Value: tag_.Value,
|
||||
|
||||
// It is imperative that we set the "HasValue" attribute,
|
||||
// in order to distinguish missing a label from the empty string.
|
||||
// https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue
|
||||
//
|
||||
// OpenCensus-Go uses non-pointers for tags as seen by this function's arguments,
|
||||
// so the best case that we can use to distinguish missing labels/tags from the
|
||||
// empty string is by checking if the Tag.Key.Name() != "" to indicate that we have
|
||||
// a value.
|
||||
HasValue: tag_.Key.Name() != "",
|
||||
})
|
||||
}
|
||||
return labelValues
|
||||
}
|
17
vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
generated
vendored
Normal file
17
vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
const Version = "0.0.1"
|
340
vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2017-09-01/dns/models.go
generated
vendored
340
vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2017-09-01/dns/models.go
generated
vendored
@@ -18,13 +18,18 @@ package dns
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// The package's fully qualified name.
|
||||
const fqdn = "github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2017-09-01/dns"
|
||||
|
||||
// RecordType enumerates the values for record type.
|
||||
type RecordType string
|
||||
|
||||
@@ -68,6 +73,18 @@ type ARecord struct {
|
||||
Ipv4Address *string `json:"ipv4Address,omitempty"`
|
||||
}
|
||||
|
||||
// AzureEntityResource the resource model definition for a Azure Resource Manager resource with an etag.
|
||||
type AzureEntityResource struct {
|
||||
// Etag - READ-ONLY; Resource Etag.
|
||||
Etag *string `json:"etag,omitempty"`
|
||||
// ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
|
||||
ID *string `json:"id,omitempty"`
|
||||
// Name - READ-ONLY; The name of the resource
|
||||
Name *string `json:"name,omitempty"`
|
||||
// Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
|
||||
Type *string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// CaaRecord a CAA record.
|
||||
type CaaRecord struct {
|
||||
// Flags - The flags for this CAA record as an integer between 0 and 255.
|
||||
@@ -116,6 +133,17 @@ type NsRecord struct {
|
||||
Nsdname *string `json:"nsdname,omitempty"`
|
||||
}
|
||||
|
||||
// ProxyResource the resource model definition for a ARM proxy resource. It will have everything other than
|
||||
// required location and tags
|
||||
type ProxyResource struct {
|
||||
// ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
|
||||
ID *string `json:"id,omitempty"`
|
||||
// Name - READ-ONLY; The name of the resource
|
||||
Name *string `json:"name,omitempty"`
|
||||
// Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
|
||||
Type *string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// PtrRecord a PTR record.
|
||||
type PtrRecord struct {
|
||||
// Ptrdname - The PTR target domain name for this PTR record.
|
||||
@@ -125,11 +153,11 @@ type PtrRecord struct {
|
||||
// RecordSet describes a DNS record set (a collection of DNS records with the same name and type).
|
||||
type RecordSet struct {
|
||||
autorest.Response `json:"-"`
|
||||
// ID - The ID of the record set.
|
||||
// ID - READ-ONLY; The ID of the record set.
|
||||
ID *string `json:"id,omitempty"`
|
||||
// Name - The name of the record set.
|
||||
// Name - READ-ONLY; The name of the record set.
|
||||
Name *string `json:"name,omitempty"`
|
||||
// Type - The type of the record set.
|
||||
// Type - READ-ONLY; The type of the record set.
|
||||
Type *string `json:"type,omitempty"`
|
||||
// Etag - The etag of the record set.
|
||||
Etag *string `json:"etag,omitempty"`
|
||||
@@ -140,15 +168,6 @@ type RecordSet struct {
|
||||
// MarshalJSON is the custom marshaler for RecordSet.
|
||||
func (rs RecordSet) MarshalJSON() ([]byte, error) {
|
||||
objectMap := make(map[string]interface{})
|
||||
if rs.ID != nil {
|
||||
objectMap["id"] = rs.ID
|
||||
}
|
||||
if rs.Name != nil {
|
||||
objectMap["name"] = rs.Name
|
||||
}
|
||||
if rs.Type != nil {
|
||||
objectMap["type"] = rs.Type
|
||||
}
|
||||
if rs.Etag != nil {
|
||||
objectMap["etag"] = rs.Etag
|
||||
}
|
||||
@@ -223,7 +242,7 @@ type RecordSetListResult struct {
|
||||
autorest.Response `json:"-"`
|
||||
// Value - Information about the record sets in the response.
|
||||
Value *[]RecordSet `json:"value,omitempty"`
|
||||
// NextLink - The continuation token for the next page of results.
|
||||
// NextLink - READ-ONLY; The continuation token for the next page of results.
|
||||
NextLink *string `json:"nextLink,omitempty"`
|
||||
}
|
||||
|
||||
@@ -233,14 +252,24 @@ type RecordSetListResultIterator struct {
|
||||
page RecordSetListResultPage
|
||||
}
|
||||
|
||||
// Next advances to the next value. If there was an error making
|
||||
// NextWithContext advances to the next value. If there was an error making
|
||||
// the request the iterator does not advance and the error is returned.
|
||||
func (iter *RecordSetListResultIterator) Next() error {
|
||||
func (iter *RecordSetListResultIterator) NextWithContext(ctx context.Context) (err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetListResultIterator.NextWithContext")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if iter.Response().Response.Response != nil {
|
||||
sc = iter.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
iter.i++
|
||||
if iter.i < len(iter.page.Values()) {
|
||||
return nil
|
||||
}
|
||||
err := iter.page.Next()
|
||||
err = iter.page.NextWithContext(ctx)
|
||||
if err != nil {
|
||||
iter.i--
|
||||
return err
|
||||
@@ -249,6 +278,13 @@ func (iter *RecordSetListResultIterator) Next() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Next advances to the next value. If there was an error making
|
||||
// the request the iterator does not advance and the error is returned.
|
||||
// Deprecated: Use NextWithContext() instead.
|
||||
func (iter *RecordSetListResultIterator) Next() error {
|
||||
return iter.NextWithContext(context.Background())
|
||||
}
|
||||
|
||||
// NotDone returns true if the enumeration should be started or is not yet complete.
|
||||
func (iter RecordSetListResultIterator) NotDone() bool {
|
||||
return iter.page.NotDone() && iter.i < len(iter.page.Values())
|
||||
@@ -268,6 +304,11 @@ func (iter RecordSetListResultIterator) Value() RecordSet {
|
||||
return iter.page.Values()[iter.i]
|
||||
}
|
||||
|
||||
// Creates a new instance of the RecordSetListResultIterator type.
|
||||
func NewRecordSetListResultIterator(page RecordSetListResultPage) RecordSetListResultIterator {
|
||||
return RecordSetListResultIterator{page: page}
|
||||
}
|
||||
|
||||
// IsEmpty returns true if the ListResult contains no values.
|
||||
func (rslr RecordSetListResult) IsEmpty() bool {
|
||||
return rslr.Value == nil || len(*rslr.Value) == 0
|
||||
@@ -275,11 +316,11 @@ func (rslr RecordSetListResult) IsEmpty() bool {
|
||||
|
||||
// recordSetListResultPreparer prepares a request to retrieve the next set of results.
|
||||
// It returns nil if no more results exist.
|
||||
func (rslr RecordSetListResult) recordSetListResultPreparer() (*http.Request, error) {
|
||||
func (rslr RecordSetListResult) recordSetListResultPreparer(ctx context.Context) (*http.Request, error) {
|
||||
if rslr.NextLink == nil || len(to.String(rslr.NextLink)) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
return autorest.Prepare(&http.Request{},
|
||||
return autorest.Prepare((&http.Request{}).WithContext(ctx),
|
||||
autorest.AsJSON(),
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(to.String(rslr.NextLink)))
|
||||
@@ -287,14 +328,24 @@ func (rslr RecordSetListResult) recordSetListResultPreparer() (*http.Request, er
|
||||
|
||||
// RecordSetListResultPage contains a page of RecordSet values.
|
||||
type RecordSetListResultPage struct {
|
||||
fn func(RecordSetListResult) (RecordSetListResult, error)
|
||||
fn func(context.Context, RecordSetListResult) (RecordSetListResult, error)
|
||||
rslr RecordSetListResult
|
||||
}
|
||||
|
||||
// Next advances to the next page of values. If there was an error making
|
||||
// NextWithContext advances to the next page of values. If there was an error making
|
||||
// the request the page does not advance and the error is returned.
|
||||
func (page *RecordSetListResultPage) Next() error {
|
||||
next, err := page.fn(page.rslr)
|
||||
func (page *RecordSetListResultPage) NextWithContext(ctx context.Context) (err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetListResultPage.NextWithContext")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if page.Response().Response.Response != nil {
|
||||
sc = page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
next, err := page.fn(ctx, page.rslr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -302,6 +353,13 @@ func (page *RecordSetListResultPage) Next() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Next advances to the next page of values. If there was an error making
|
||||
// the request the page does not advance and the error is returned.
|
||||
// Deprecated: Use NextWithContext() instead.
|
||||
func (page *RecordSetListResultPage) Next() error {
|
||||
return page.NextWithContext(context.Background())
|
||||
}
|
||||
|
||||
// NotDone returns true if the page enumeration should be started or is not yet complete.
|
||||
func (page RecordSetListResultPage) NotDone() bool {
|
||||
return !page.rslr.IsEmpty()
|
||||
@@ -320,13 +378,18 @@ func (page RecordSetListResultPage) Values() []RecordSet {
|
||||
return *page.rslr.Value
|
||||
}
|
||||
|
||||
// Creates a new instance of the RecordSetListResultPage type.
|
||||
func NewRecordSetListResultPage(getNextPage func(context.Context, RecordSetListResult) (RecordSetListResult, error)) RecordSetListResultPage {
|
||||
return RecordSetListResultPage{fn: getNextPage}
|
||||
}
|
||||
|
||||
// RecordSetProperties represents the properties of the records in the record set.
|
||||
type RecordSetProperties struct {
|
||||
// Metadata - The metadata attached to the record set.
|
||||
Metadata map[string]*string `json:"metadata"`
|
||||
// TTL - The TTL (time-to-live) of the records in the record set.
|
||||
TTL *int64 `json:"TTL,omitempty"`
|
||||
// Fqdn - Fully qualified domain name of the record set.
|
||||
// Fqdn - READ-ONLY; Fully qualified domain name of the record set.
|
||||
Fqdn *string `json:"fqdn,omitempty"`
|
||||
// ARecords - The list of A records in the record set.
|
||||
ARecords *[]ARecord `json:"ARecords,omitempty"`
|
||||
@@ -359,9 +422,6 @@ func (rsp RecordSetProperties) MarshalJSON() ([]byte, error) {
|
||||
if rsp.TTL != nil {
|
||||
objectMap["TTL"] = rsp.TTL
|
||||
}
|
||||
if rsp.Fqdn != nil {
|
||||
objectMap["fqdn"] = rsp.Fqdn
|
||||
}
|
||||
if rsp.ARecords != nil {
|
||||
objectMap["ARecords"] = rsp.ARecords
|
||||
}
|
||||
@@ -401,39 +461,14 @@ type RecordSetUpdateParameters struct {
|
||||
RecordSet *RecordSet `json:"RecordSet,omitempty"`
|
||||
}
|
||||
|
||||
// Resource common properties of an Azure Resource Manager resource
|
||||
// Resource ...
|
||||
type Resource struct {
|
||||
// ID - Resource ID.
|
||||
// ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
|
||||
ID *string `json:"id,omitempty"`
|
||||
// Name - Resource name.
|
||||
// Name - READ-ONLY; The name of the resource
|
||||
Name *string `json:"name,omitempty"`
|
||||
// Type - Resource type.
|
||||
// Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
|
||||
Type *string `json:"type,omitempty"`
|
||||
// Location - Resource location.
|
||||
Location *string `json:"location,omitempty"`
|
||||
// Tags - Resource tags.
|
||||
Tags map[string]*string `json:"tags"`
|
||||
}
|
||||
|
||||
// MarshalJSON is the custom marshaler for Resource.
|
||||
func (r Resource) MarshalJSON() ([]byte, error) {
|
||||
objectMap := make(map[string]interface{})
|
||||
if r.ID != nil {
|
||||
objectMap["id"] = r.ID
|
||||
}
|
||||
if r.Name != nil {
|
||||
objectMap["name"] = r.Name
|
||||
}
|
||||
if r.Type != nil {
|
||||
objectMap["type"] = r.Type
|
||||
}
|
||||
if r.Location != nil {
|
||||
objectMap["location"] = r.Location
|
||||
}
|
||||
if r.Tags != nil {
|
||||
objectMap["tags"] = r.Tags
|
||||
}
|
||||
return json.Marshal(objectMap)
|
||||
}
|
||||
|
||||
// SoaRecord an SOA record.
|
||||
@@ -472,6 +507,32 @@ type SubResource struct {
|
||||
ID *string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
// TrackedResource the resource model definition for a ARM tracked top level resource
|
||||
type TrackedResource struct {
|
||||
// Tags - Resource tags.
|
||||
Tags map[string]*string `json:"tags"`
|
||||
// Location - The geo-location where the resource lives
|
||||
Location *string `json:"location,omitempty"`
|
||||
// ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
|
||||
ID *string `json:"id,omitempty"`
|
||||
// Name - READ-ONLY; The name of the resource
|
||||
Name *string `json:"name,omitempty"`
|
||||
// Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
|
||||
Type *string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// MarshalJSON is the custom marshaler for TrackedResource.
|
||||
func (tr TrackedResource) MarshalJSON() ([]byte, error) {
|
||||
objectMap := make(map[string]interface{})
|
||||
if tr.Tags != nil {
|
||||
objectMap["tags"] = tr.Tags
|
||||
}
|
||||
if tr.Location != nil {
|
||||
objectMap["location"] = tr.Location
|
||||
}
|
||||
return json.Marshal(objectMap)
|
||||
}
|
||||
|
||||
// TxtRecord a TXT record.
|
||||
type TxtRecord struct {
|
||||
// Value - The text value of this TXT record.
|
||||
@@ -485,16 +546,16 @@ type Zone struct {
|
||||
Etag *string `json:"etag,omitempty"`
|
||||
// ZoneProperties - The properties of the zone.
|
||||
*ZoneProperties `json:"properties,omitempty"`
|
||||
// ID - Resource ID.
|
||||
ID *string `json:"id,omitempty"`
|
||||
// Name - Resource name.
|
||||
Name *string `json:"name,omitempty"`
|
||||
// Type - Resource type.
|
||||
Type *string `json:"type,omitempty"`
|
||||
// Location - Resource location.
|
||||
Location *string `json:"location,omitempty"`
|
||||
// Tags - Resource tags.
|
||||
Tags map[string]*string `json:"tags"`
|
||||
// Location - The geo-location where the resource lives
|
||||
Location *string `json:"location,omitempty"`
|
||||
// ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
|
||||
ID *string `json:"id,omitempty"`
|
||||
// Name - READ-ONLY; The name of the resource
|
||||
Name *string `json:"name,omitempty"`
|
||||
// Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
|
||||
Type *string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// MarshalJSON is the custom marshaler for Zone.
|
||||
@@ -506,21 +567,12 @@ func (z Zone) MarshalJSON() ([]byte, error) {
|
||||
if z.ZoneProperties != nil {
|
||||
objectMap["properties"] = z.ZoneProperties
|
||||
}
|
||||
if z.ID != nil {
|
||||
objectMap["id"] = z.ID
|
||||
}
|
||||
if z.Name != nil {
|
||||
objectMap["name"] = z.Name
|
||||
}
|
||||
if z.Type != nil {
|
||||
objectMap["type"] = z.Type
|
||||
if z.Tags != nil {
|
||||
objectMap["tags"] = z.Tags
|
||||
}
|
||||
if z.Location != nil {
|
||||
objectMap["location"] = z.Location
|
||||
}
|
||||
if z.Tags != nil {
|
||||
objectMap["tags"] = z.Tags
|
||||
}
|
||||
return json.Marshal(objectMap)
|
||||
}
|
||||
|
||||
@@ -551,6 +603,24 @@ func (z *Zone) UnmarshalJSON(body []byte) error {
|
||||
}
|
||||
z.ZoneProperties = &zoneProperties
|
||||
}
|
||||
case "tags":
|
||||
if v != nil {
|
||||
var tags map[string]*string
|
||||
err = json.Unmarshal(*v, &tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
z.Tags = tags
|
||||
}
|
||||
case "location":
|
||||
if v != nil {
|
||||
var location string
|
||||
err = json.Unmarshal(*v, &location)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
z.Location = &location
|
||||
}
|
||||
case "id":
|
||||
if v != nil {
|
||||
var ID string
|
||||
@@ -578,24 +648,6 @@ func (z *Zone) UnmarshalJSON(body []byte) error {
|
||||
}
|
||||
z.Type = &typeVar
|
||||
}
|
||||
case "location":
|
||||
if v != nil {
|
||||
var location string
|
||||
err = json.Unmarshal(*v, &location)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
z.Location = &location
|
||||
}
|
||||
case "tags":
|
||||
if v != nil {
|
||||
var tags map[string]*string
|
||||
err = json.Unmarshal(*v, &tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
z.Tags = tags
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -607,7 +659,7 @@ type ZoneListResult struct {
|
||||
autorest.Response `json:"-"`
|
||||
// Value - Information about the DNS zones.
|
||||
Value *[]Zone `json:"value,omitempty"`
|
||||
// NextLink - The continuation token for the next page of results.
|
||||
// NextLink - READ-ONLY; The continuation token for the next page of results.
|
||||
NextLink *string `json:"nextLink,omitempty"`
|
||||
}
|
||||
|
||||
@@ -617,14 +669,24 @@ type ZoneListResultIterator struct {
|
||||
page ZoneListResultPage
|
||||
}
|
||||
|
||||
// Next advances to the next value. If there was an error making
|
||||
// NextWithContext advances to the next value. If there was an error making
|
||||
// the request the iterator does not advance and the error is returned.
|
||||
func (iter *ZoneListResultIterator) Next() error {
|
||||
func (iter *ZoneListResultIterator) NextWithContext(ctx context.Context) (err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ZoneListResultIterator.NextWithContext")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if iter.Response().Response.Response != nil {
|
||||
sc = iter.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
iter.i++
|
||||
if iter.i < len(iter.page.Values()) {
|
||||
return nil
|
||||
}
|
||||
err := iter.page.Next()
|
||||
err = iter.page.NextWithContext(ctx)
|
||||
if err != nil {
|
||||
iter.i--
|
||||
return err
|
||||
@@ -633,6 +695,13 @@ func (iter *ZoneListResultIterator) Next() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Next advances to the next value. If there was an error making
|
||||
// the request the iterator does not advance and the error is returned.
|
||||
// Deprecated: Use NextWithContext() instead.
|
||||
func (iter *ZoneListResultIterator) Next() error {
|
||||
return iter.NextWithContext(context.Background())
|
||||
}
|
||||
|
||||
// NotDone returns true if the enumeration should be started or is not yet complete.
|
||||
func (iter ZoneListResultIterator) NotDone() bool {
|
||||
return iter.page.NotDone() && iter.i < len(iter.page.Values())
|
||||
@@ -652,6 +721,11 @@ func (iter ZoneListResultIterator) Value() Zone {
|
||||
return iter.page.Values()[iter.i]
|
||||
}
|
||||
|
||||
// Creates a new instance of the ZoneListResultIterator type.
|
||||
func NewZoneListResultIterator(page ZoneListResultPage) ZoneListResultIterator {
|
||||
return ZoneListResultIterator{page: page}
|
||||
}
|
||||
|
||||
// IsEmpty returns true if the ListResult contains no values.
|
||||
func (zlr ZoneListResult) IsEmpty() bool {
|
||||
return zlr.Value == nil || len(*zlr.Value) == 0
|
||||
@@ -659,11 +733,11 @@ func (zlr ZoneListResult) IsEmpty() bool {
|
||||
|
||||
// zoneListResultPreparer prepares a request to retrieve the next set of results.
|
||||
// It returns nil if no more results exist.
|
||||
func (zlr ZoneListResult) zoneListResultPreparer() (*http.Request, error) {
|
||||
func (zlr ZoneListResult) zoneListResultPreparer(ctx context.Context) (*http.Request, error) {
|
||||
if zlr.NextLink == nil || len(to.String(zlr.NextLink)) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
return autorest.Prepare(&http.Request{},
|
||||
return autorest.Prepare((&http.Request{}).WithContext(ctx),
|
||||
autorest.AsJSON(),
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(to.String(zlr.NextLink)))
|
||||
@@ -671,14 +745,24 @@ func (zlr ZoneListResult) zoneListResultPreparer() (*http.Request, error) {
|
||||
|
||||
// ZoneListResultPage contains a page of Zone values.
|
||||
type ZoneListResultPage struct {
|
||||
fn func(ZoneListResult) (ZoneListResult, error)
|
||||
fn func(context.Context, ZoneListResult) (ZoneListResult, error)
|
||||
zlr ZoneListResult
|
||||
}
|
||||
|
||||
// Next advances to the next page of values. If there was an error making
|
||||
// NextWithContext advances to the next page of values. If there was an error making
|
||||
// the request the page does not advance and the error is returned.
|
||||
func (page *ZoneListResultPage) Next() error {
|
||||
next, err := page.fn(page.zlr)
|
||||
func (page *ZoneListResultPage) NextWithContext(ctx context.Context) (err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ZoneListResultPage.NextWithContext")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if page.Response().Response.Response != nil {
|
||||
sc = page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
next, err := page.fn(ctx, page.zlr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -686,6 +770,13 @@ func (page *ZoneListResultPage) Next() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Next advances to the next page of values. If there was an error making
|
||||
// the request the page does not advance and the error is returned.
|
||||
// Deprecated: Use NextWithContext() instead.
|
||||
func (page *ZoneListResultPage) Next() error {
|
||||
return page.NextWithContext(context.Background())
|
||||
}
|
||||
|
||||
// NotDone returns true if the page enumeration should be started or is not yet complete.
|
||||
func (page ZoneListResultPage) NotDone() bool {
|
||||
return !page.zlr.IsEmpty()
|
||||
@@ -704,60 +795,39 @@ func (page ZoneListResultPage) Values() []Zone {
|
||||
return *page.zlr.Value
|
||||
}
|
||||
|
||||
// Creates a new instance of the ZoneListResultPage type.
|
||||
func NewZoneListResultPage(getNextPage func(context.Context, ZoneListResult) (ZoneListResult, error)) ZoneListResultPage {
|
||||
return ZoneListResultPage{fn: getNextPage}
|
||||
}
|
||||
|
||||
// ZoneProperties represents the properties of the zone.
|
||||
type ZoneProperties struct {
|
||||
// MaxNumberOfRecordSets - The maximum number of record sets that can be created in this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
|
||||
// MaxNumberOfRecordSets - READ-ONLY; The maximum number of record sets that can be created in this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
|
||||
MaxNumberOfRecordSets *int64 `json:"maxNumberOfRecordSets,omitempty"`
|
||||
// NumberOfRecordSets - The current number of record sets in this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
|
||||
// NumberOfRecordSets - READ-ONLY; The current number of record sets in this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
|
||||
NumberOfRecordSets *int64 `json:"numberOfRecordSets,omitempty"`
|
||||
// NameServers - The name servers for this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
|
||||
// NameServers - READ-ONLY; The name servers for this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
|
||||
NameServers *[]string `json:"nameServers,omitempty"`
|
||||
}
|
||||
|
||||
// ZonesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation.
|
||||
type ZonesDeleteFuture struct {
|
||||
azure.Future
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
// Result returns the result of the asynchronous operation.
|
||||
// If the operation has not completed it will return an error.
|
||||
func (future ZonesDeleteFuture) Result(client ZonesClient) (ar autorest.Response, err error) {
|
||||
func (future *ZonesDeleteFuture) Result(client ZonesClient) (ar autorest.Response, err error) {
|
||||
var done bool
|
||||
done, err = future.Done(client)
|
||||
done, err = future.DoneWithContext(context.Background(), client)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "dns.ZonesDeleteFuture", "Result", future.Response(), "Polling failure")
|
||||
return
|
||||
}
|
||||
if !done {
|
||||
return ar, azure.NewAsyncOpIncompleteError("dns.ZonesDeleteFuture")
|
||||
}
|
||||
if future.PollingMethod() == azure.PollingLocation {
|
||||
ar, err = client.DeleteResponder(future.Response())
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "dns.ZonesDeleteFuture", "Result", future.Response(), "Failure responding to request")
|
||||
}
|
||||
err = azure.NewAsyncOpIncompleteError("dns.ZonesDeleteFuture")
|
||||
return
|
||||
}
|
||||
var req *http.Request
|
||||
var resp *http.Response
|
||||
if future.PollingURL() != "" {
|
||||
req, err = http.NewRequest(http.MethodGet, future.PollingURL(), nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
req = autorest.ChangeToGet(future.req)
|
||||
}
|
||||
resp, err = autorest.SendWithSender(client, req,
|
||||
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "dns.ZonesDeleteFuture", "Result", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
ar, err = client.DeleteResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "dns.ZonesDeleteFuture", "Result", resp, "Failure responding to request")
|
||||
}
|
||||
ar.Response = future.Response()
|
||||
return
|
||||
}
|
||||
|
235
vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2017-09-01/dns/recordsets.go
generated
vendored
235
vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2017-09-01/dns/recordsets.go
generated
vendored
@@ -21,6 +21,8 @@ import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
@@ -40,15 +42,38 @@ func NewRecordSetsClientWithBaseURI(baseURI string, subscriptionID string) Recor
|
||||
}
|
||||
|
||||
// CreateOrUpdate creates or updates a record set within a DNS zone.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
|
||||
// dot). relativeRecordSetName is the name of the record set, relative to the name of the zone. recordType is the
|
||||
// type of DNS record in this record set. Record sets of type SOA can be updated but not created (they are created
|
||||
// when the DNS zone is created). parameters is parameters supplied to the CreateOrUpdate operation. ifMatch is the
|
||||
// etag of the record set. Omit this value to always overwrite the current record set. Specify the last-seen etag
|
||||
// value to prevent accidentally overwritting any concurrent changes. ifNoneMatch is set to '*' to allow a new
|
||||
// record set to be created, but to prevent updating an existing record set. Other values will be ignored.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group. The name is case insensitive.
|
||||
// zoneName - the name of the DNS zone (without a terminating dot).
|
||||
// relativeRecordSetName - the name of the record set, relative to the name of the zone.
|
||||
// recordType - the type of DNS record in this record set. Record sets of type SOA can be updated but not
|
||||
// created (they are created when the DNS zone is created).
|
||||
// parameters - parameters supplied to the CreateOrUpdate operation.
|
||||
// ifMatch - the etag of the record set. Omit this value to always overwrite the current record set. Specify
|
||||
// the last-seen etag value to prevent accidentally overwriting any concurrent changes.
|
||||
// ifNoneMatch - set to '*' to allow a new record set to be created, but to prevent updating an existing record
|
||||
// set. Other values will be ignored.
|
||||
func (client RecordSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string, ifNoneMatch string) (result RecordSet, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.CreateOrUpdate")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("dns.RecordSetsClient", "CreateOrUpdate", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, parameters, ifMatch, ifNoneMatch)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", nil, "Failure preparing request")
|
||||
@@ -85,6 +110,9 @@ func (client RecordSetsClient) CreateOrUpdatePreparer(ctx context.Context, resou
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
parameters.ID = nil
|
||||
parameters.Name = nil
|
||||
parameters.Type = nil
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsContentType("application/json; charset=utf-8"),
|
||||
autorest.AsPut(),
|
||||
@@ -124,13 +152,35 @@ func (client RecordSetsClient) CreateOrUpdateResponder(resp *http.Response) (res
|
||||
}
|
||||
|
||||
// Delete deletes a record set from a DNS zone. This operation cannot be undone.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
|
||||
// dot). relativeRecordSetName is the name of the record set, relative to the name of the zone. recordType is the
|
||||
// type of DNS record in this record set. Record sets of type SOA cannot be deleted (they are deleted when the DNS
|
||||
// zone is deleted). ifMatch is the etag of the record set. Omit this value to always delete the current record
|
||||
// set. Specify the last-seen etag value to prevent accidentally deleting any concurrent changes.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group. The name is case insensitive.
|
||||
// zoneName - the name of the DNS zone (without a terminating dot).
|
||||
// relativeRecordSetName - the name of the record set, relative to the name of the zone.
|
||||
// recordType - the type of DNS record in this record set. Record sets of type SOA cannot be deleted (they are
|
||||
// deleted when the DNS zone is deleted).
|
||||
// ifMatch - the etag of the record set. Omit this value to always delete the current record set. Specify the
|
||||
// last-seen etag value to prevent accidentally deleting any concurrent changes.
|
||||
func (client RecordSetsClient) Delete(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, ifMatch string) (result autorest.Response, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Delete")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response != nil {
|
||||
sc = result.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("dns.RecordSetsClient", "Delete", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.DeletePreparer(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, ifMatch)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", nil, "Failure preparing request")
|
||||
@@ -199,11 +249,32 @@ func (client RecordSetsClient) DeleteResponder(resp *http.Response) (result auto
|
||||
}
|
||||
|
||||
// Get gets a record set.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
|
||||
// dot). relativeRecordSetName is the name of the record set, relative to the name of the zone. recordType is the
|
||||
// type of DNS record in this record set.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group. The name is case insensitive.
|
||||
// zoneName - the name of the DNS zone (without a terminating dot).
|
||||
// relativeRecordSetName - the name of the record set, relative to the name of the zone.
|
||||
// recordType - the type of DNS record in this record set.
|
||||
func (client RecordSetsClient) Get(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType) (result RecordSet, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Get")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("dns.RecordSetsClient", "Get", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.GetPreparer(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", nil, "Failure preparing request")
|
||||
@@ -269,13 +340,34 @@ func (client RecordSetsClient) GetResponder(resp *http.Response) (result RecordS
|
||||
}
|
||||
|
||||
// ListByDNSZone lists all record sets in a DNS zone.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
|
||||
// dot). top is the maximum number of record sets to return. If not specified, returns up to 100 record sets.
|
||||
// recordsetnamesuffix is the suffix label of the record set name that has to be used to filter the record set
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group. The name is case insensitive.
|
||||
// zoneName - the name of the DNS zone (without a terminating dot).
|
||||
// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets.
|
||||
// recordsetnamesuffix - the suffix label of the record set name that has to be used to filter the record set
|
||||
// enumerations. If this parameter is specified, Enumeration will return only records that end with
|
||||
// .<recordSetNameSuffix>
|
||||
func (client RecordSetsClient) ListByDNSZone(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordsetnamesuffix string) (result RecordSetListResultPage, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByDNSZone")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.rslr.Response.Response != nil {
|
||||
sc = result.rslr.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("dns.RecordSetsClient", "ListByDNSZone", err.Error())
|
||||
}
|
||||
|
||||
result.fn = client.listByDNSZoneNextResults
|
||||
req, err := client.ListByDNSZonePreparer(ctx, resourceGroupName, zoneName, top, recordsetnamesuffix)
|
||||
if err != nil {
|
||||
@@ -346,8 +438,8 @@ func (client RecordSetsClient) ListByDNSZoneResponder(resp *http.Response) (resu
|
||||
}
|
||||
|
||||
// listByDNSZoneNextResults retrieves the next set of results, if any.
|
||||
func (client RecordSetsClient) listByDNSZoneNextResults(lastResults RecordSetListResult) (result RecordSetListResult, err error) {
|
||||
req, err := lastResults.recordSetListResultPreparer()
|
||||
func (client RecordSetsClient) listByDNSZoneNextResults(ctx context.Context, lastResults RecordSetListResult) (result RecordSetListResult, err error) {
|
||||
req, err := lastResults.recordSetListResultPreparer(ctx)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByDNSZoneNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
@@ -368,18 +460,50 @@ func (client RecordSetsClient) listByDNSZoneNextResults(lastResults RecordSetLis
|
||||
|
||||
// ListByDNSZoneComplete enumerates all values, automatically crossing page boundaries as required.
|
||||
func (client RecordSetsClient) ListByDNSZoneComplete(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordsetnamesuffix string) (result RecordSetListResultIterator, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByDNSZone")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response().Response.Response != nil {
|
||||
sc = result.page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.page, err = client.ListByDNSZone(ctx, resourceGroupName, zoneName, top, recordsetnamesuffix)
|
||||
return
|
||||
}
|
||||
|
||||
// ListByType lists the record sets of a specified type in a DNS zone.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
|
||||
// dot). recordType is the type of record sets to enumerate. top is the maximum number of record sets to return. If
|
||||
// not specified, returns up to 100 record sets. recordsetnamesuffix is the suffix label of the record set name
|
||||
// that has to be used to filter the record set enumerations. If this parameter is specified, Enumeration will
|
||||
// return only records that end with .<recordSetNameSuffix>
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group. The name is case insensitive.
|
||||
// zoneName - the name of the DNS zone (without a terminating dot).
|
||||
// recordType - the type of record sets to enumerate.
|
||||
// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets.
|
||||
// recordsetnamesuffix - the suffix label of the record set name that has to be used to filter the record set
|
||||
// enumerations. If this parameter is specified, Enumeration will return only records that end with
|
||||
// .<recordSetNameSuffix>
|
||||
func (client RecordSetsClient) ListByType(ctx context.Context, resourceGroupName string, zoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (result RecordSetListResultPage, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByType")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.rslr.Response.Response != nil {
|
||||
sc = result.rslr.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("dns.RecordSetsClient", "ListByType", err.Error())
|
||||
}
|
||||
|
||||
result.fn = client.listByTypeNextResults
|
||||
req, err := client.ListByTypePreparer(ctx, resourceGroupName, zoneName, recordType, top, recordsetnamesuffix)
|
||||
if err != nil {
|
||||
@@ -451,8 +575,8 @@ func (client RecordSetsClient) ListByTypeResponder(resp *http.Response) (result
|
||||
}
|
||||
|
||||
// listByTypeNextResults retrieves the next set of results, if any.
|
||||
func (client RecordSetsClient) listByTypeNextResults(lastResults RecordSetListResult) (result RecordSetListResult, err error) {
|
||||
req, err := lastResults.recordSetListResultPreparer()
|
||||
func (client RecordSetsClient) listByTypeNextResults(ctx context.Context, lastResults RecordSetListResult) (result RecordSetListResult, err error) {
|
||||
req, err := lastResults.recordSetListResultPreparer(ctx)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByTypeNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
@@ -473,18 +597,50 @@ func (client RecordSetsClient) listByTypeNextResults(lastResults RecordSetListRe
|
||||
|
||||
// ListByTypeComplete enumerates all values, automatically crossing page boundaries as required.
|
||||
func (client RecordSetsClient) ListByTypeComplete(ctx context.Context, resourceGroupName string, zoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (result RecordSetListResultIterator, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByType")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response().Response.Response != nil {
|
||||
sc = result.page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.page, err = client.ListByType(ctx, resourceGroupName, zoneName, recordType, top, recordsetnamesuffix)
|
||||
return
|
||||
}
|
||||
|
||||
// Update updates a record set within a DNS zone.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
|
||||
// dot). relativeRecordSetName is the name of the record set, relative to the name of the zone. recordType is the
|
||||
// type of DNS record in this record set. parameters is parameters supplied to the Update operation. ifMatch is the
|
||||
// etag of the record set. Omit this value to always overwrite the current record set. Specify the last-seen etag
|
||||
// value to prevent accidentally overwritting concurrent changes.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group. The name is case insensitive.
|
||||
// zoneName - the name of the DNS zone (without a terminating dot).
|
||||
// relativeRecordSetName - the name of the record set, relative to the name of the zone.
|
||||
// recordType - the type of DNS record in this record set.
|
||||
// parameters - parameters supplied to the Update operation.
|
||||
// ifMatch - the etag of the record set. Omit this value to always overwrite the current record set. Specify
|
||||
// the last-seen etag value to prevent accidentally overwriting concurrent changes.
|
||||
func (client RecordSetsClient) Update(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string) (result RecordSet, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Update")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("dns.RecordSetsClient", "Update", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.UpdatePreparer(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, parameters, ifMatch)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", nil, "Failure preparing request")
|
||||
@@ -521,6 +677,9 @@ func (client RecordSetsClient) UpdatePreparer(ctx context.Context, resourceGroup
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
parameters.ID = nil
|
||||
parameters.Name = nil
|
||||
parameters.Type = nil
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsContentType("application/json; charset=utf-8"),
|
||||
autorest.AsPatch(),
|
||||
|
175
vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2017-09-01/dns/zones.go
generated
vendored
175
vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2017-09-01/dns/zones.go
generated
vendored
@@ -21,6 +21,8 @@ import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
@@ -40,13 +42,35 @@ func NewZonesClientWithBaseURI(baseURI string, subscriptionID string) ZonesClien
|
||||
}
|
||||
|
||||
// CreateOrUpdate creates or updates a DNS zone. Does not modify DNS records within the zone.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
|
||||
// dot). parameters is parameters supplied to the CreateOrUpdate operation. ifMatch is the etag of the DNS zone.
|
||||
// Omit this value to always overwrite the current zone. Specify the last-seen etag value to prevent accidentally
|
||||
// overwritting any concurrent changes. ifNoneMatch is set to '*' to allow a new DNS zone to be created, but to
|
||||
// prevent updating an existing zone. Other values will be ignored.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group. The name is case insensitive.
|
||||
// zoneName - the name of the DNS zone (without a terminating dot).
|
||||
// parameters - parameters supplied to the CreateOrUpdate operation.
|
||||
// ifMatch - the etag of the DNS zone. Omit this value to always overwrite the current zone. Specify the
|
||||
// last-seen etag value to prevent accidentally overwriting any concurrent changes.
|
||||
// ifNoneMatch - set to '*' to allow a new DNS zone to be created, but to prevent updating an existing zone.
|
||||
// Other values will be ignored.
|
||||
func (client ZonesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, zoneName string, parameters Zone, ifMatch string, ifNoneMatch string) (result Zone, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.CreateOrUpdate")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("dns.ZonesClient", "CreateOrUpdate", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, zoneName, parameters, ifMatch, ifNoneMatch)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", nil, "Failure preparing request")
|
||||
@@ -121,11 +145,32 @@ func (client ZonesClient) CreateOrUpdateResponder(resp *http.Response) (result Z
|
||||
|
||||
// Delete deletes a DNS zone. WARNING: All DNS records in the zone will also be deleted. This operation cannot be
|
||||
// undone.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
|
||||
// dot). ifMatch is the etag of the DNS zone. Omit this value to always delete the current zone. Specify the
|
||||
// last-seen etag value to prevent accidentally deleting any concurrent changes.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group. The name is case insensitive.
|
||||
// zoneName - the name of the DNS zone (without a terminating dot).
|
||||
// ifMatch - the etag of the DNS zone. Omit this value to always delete the current zone. Specify the last-seen
|
||||
// etag value to prevent accidentally deleting any concurrent changes.
|
||||
func (client ZonesClient) Delete(ctx context.Context, resourceGroupName string, zoneName string, ifMatch string) (result ZonesDeleteFuture, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.Delete")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response() != nil {
|
||||
sc = result.Response().StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("dns.ZonesClient", "Delete", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.DeletePreparer(ctx, resourceGroupName, zoneName, ifMatch)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", nil, "Failure preparing request")
|
||||
@@ -169,15 +214,13 @@ func (client ZonesClient) DeletePreparer(ctx context.Context, resourceGroupName
|
||||
// DeleteSender sends the Delete request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client ZonesClient) DeleteSender(req *http.Request) (future ZonesDeleteFuture, err error) {
|
||||
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
|
||||
future.Future = azure.NewFuture(req)
|
||||
future.req = req
|
||||
_, err = future.Done(sender)
|
||||
var resp *http.Response
|
||||
resp, err = autorest.SendWithSender(client, req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = autorest.Respond(future.Response(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
|
||||
future.Future, err = azure.NewFutureFromResponse(resp)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -194,10 +237,30 @@ func (client ZonesClient) DeleteResponder(resp *http.Response) (result autorest.
|
||||
}
|
||||
|
||||
// Get gets a DNS zone. Retrieves the zone properties, but not the record sets within the zone.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
|
||||
// dot).
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group. The name is case insensitive.
|
||||
// zoneName - the name of the DNS zone (without a terminating dot).
|
||||
func (client ZonesClient) Get(ctx context.Context, resourceGroupName string, zoneName string) (result Zone, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.Get")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("dns.ZonesClient", "Get", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.GetPreparer(ctx, resourceGroupName, zoneName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", nil, "Failure preparing request")
|
||||
@@ -261,9 +324,25 @@ func (client ZonesClient) GetResponder(resp *http.Response) (result Zone, err er
|
||||
}
|
||||
|
||||
// List lists the DNS zones in all resource groups in a subscription.
|
||||
//
|
||||
// top is the maximum number of DNS zones to return. If not specified, returns up to 100 zones.
|
||||
// Parameters:
|
||||
// top - the maximum number of DNS zones to return. If not specified, returns up to 100 zones.
|
||||
func (client ZonesClient) List(ctx context.Context, top *int32) (result ZoneListResultPage, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.zlr.Response.Response != nil {
|
||||
sc = result.zlr.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("dns.ZonesClient", "List", err.Error())
|
||||
}
|
||||
|
||||
result.fn = client.listNextResults
|
||||
req, err := client.ListPreparer(ctx, top)
|
||||
if err != nil {
|
||||
@@ -329,8 +408,8 @@ func (client ZonesClient) ListResponder(resp *http.Response) (result ZoneListRes
|
||||
}
|
||||
|
||||
// listNextResults retrieves the next set of results, if any.
|
||||
func (client ZonesClient) listNextResults(lastResults ZoneListResult) (result ZoneListResult, err error) {
|
||||
req, err := lastResults.zoneListResultPreparer()
|
||||
func (client ZonesClient) listNextResults(ctx context.Context, lastResults ZoneListResult) (result ZoneListResult, err error) {
|
||||
req, err := lastResults.zoneListResultPreparer(ctx)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
@@ -351,15 +430,45 @@ func (client ZonesClient) listNextResults(lastResults ZoneListResult) (result Zo
|
||||
|
||||
// ListComplete enumerates all values, automatically crossing page boundaries as required.
|
||||
func (client ZonesClient) ListComplete(ctx context.Context, top *int32) (result ZoneListResultIterator, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response().Response.Response != nil {
|
||||
sc = result.page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.page, err = client.List(ctx, top)
|
||||
return
|
||||
}
|
||||
|
||||
// ListByResourceGroup lists the DNS zones within a resource group.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group. top is the maximum number of record sets to return. If not
|
||||
// specified, returns up to 100 record sets.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group. The name is case insensitive.
|
||||
// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets.
|
||||
func (client ZonesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, top *int32) (result ZoneListResultPage, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.ListByResourceGroup")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.zlr.Response.Response != nil {
|
||||
sc = result.zlr.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("dns.ZonesClient", "ListByResourceGroup", err.Error())
|
||||
}
|
||||
|
||||
result.fn = client.listByResourceGroupNextResults
|
||||
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, top)
|
||||
if err != nil {
|
||||
@@ -426,8 +535,8 @@ func (client ZonesClient) ListByResourceGroupResponder(resp *http.Response) (res
|
||||
}
|
||||
|
||||
// listByResourceGroupNextResults retrieves the next set of results, if any.
|
||||
func (client ZonesClient) listByResourceGroupNextResults(lastResults ZoneListResult) (result ZoneListResult, err error) {
|
||||
req, err := lastResults.zoneListResultPreparer()
|
||||
func (client ZonesClient) listByResourceGroupNextResults(ctx context.Context, lastResults ZoneListResult) (result ZoneListResult, err error) {
|
||||
req, err := lastResults.zoneListResultPreparer(ctx)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
@@ -448,6 +557,16 @@ func (client ZonesClient) listByResourceGroupNextResults(lastResults ZoneListRes
|
||||
|
||||
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
|
||||
func (client ZonesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, top *int32) (result ZoneListResultIterator, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.ListByResourceGroup")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response().Response.Response != nil {
|
||||
sc = result.page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, top)
|
||||
return
|
||||
}
|
||||
|
2
vendor/github.com/Azure/azure-sdk-for-go/version/version.go
generated
vendored
2
vendor/github.com/Azure/azure-sdk-for-go/version/version.go
generated
vendored
@@ -18,4 +18,4 @@ package version
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
// Number contains the semantic version of this SDK.
|
||||
const Number = "v15.0.1"
|
||||
const Number = "v31.2.0"
|
||||
|
88
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
88
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
@@ -15,21 +15,22 @@ package adal
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
const (
|
||||
activeDirectoryAPIVersion = "1.0"
|
||||
activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
|
||||
)
|
||||
|
||||
// OAuthConfig represents the endpoints needed
|
||||
// in OAuth operations
|
||||
type OAuthConfig struct {
|
||||
AuthorityEndpoint url.URL
|
||||
AuthorizeEndpoint url.URL
|
||||
TokenEndpoint url.URL
|
||||
DeviceCodeEndpoint url.URL
|
||||
AuthorityEndpoint url.URL `json:"authorityEndpoint"`
|
||||
AuthorizeEndpoint url.URL `json:"authorizeEndpoint"`
|
||||
TokenEndpoint url.URL `json:"tokenEndpoint"`
|
||||
DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"`
|
||||
}
|
||||
|
||||
// IsZero returns true if the OAuthConfig object is zero-initialized.
|
||||
@@ -46,11 +47,24 @@ func validateStringParam(param, name string) error {
|
||||
|
||||
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
|
||||
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
|
||||
apiVer := "1.0"
|
||||
return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer)
|
||||
}
|
||||
|
||||
// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
|
||||
// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
|
||||
func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) {
|
||||
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
api := ""
|
||||
// it's legal for tenantID to be empty so don't validate it
|
||||
const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s"
|
||||
if apiVersion != nil {
|
||||
if err := validateStringParam(*apiVersion, "apiVersion"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
api = fmt.Sprintf("?api-version=%s", *apiVersion)
|
||||
}
|
||||
u, err := url.Parse(activeDirectoryEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -59,15 +73,15 @@ func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion))
|
||||
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion))
|
||||
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion))
|
||||
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -79,3 +93,59 @@ func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, err
|
||||
DeviceCodeEndpoint: *deviceCodeURL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs.
|
||||
type MultiTenantOAuthConfig interface {
|
||||
PrimaryTenant() *OAuthConfig
|
||||
AuxiliaryTenants() []*OAuthConfig
|
||||
}
|
||||
|
||||
// OAuthOptions contains optional OAuthConfig creation arguments.
|
||||
type OAuthOptions struct {
|
||||
APIVersion string
|
||||
}
|
||||
|
||||
func (c OAuthOptions) apiVersion() string {
|
||||
if c.APIVersion != "" {
|
||||
return fmt.Sprintf("?api-version=%s", c.APIVersion)
|
||||
}
|
||||
return "1.0"
|
||||
}
|
||||
|
||||
// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration.
|
||||
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information.
|
||||
func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) {
|
||||
if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 {
|
||||
return nil, errors.New("must specify one to three auxiliary tenants")
|
||||
}
|
||||
mtCfg := multiTenantOAuthConfig{
|
||||
cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1),
|
||||
}
|
||||
apiVer := options.apiVersion()
|
||||
pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err)
|
||||
}
|
||||
mtCfg.cfgs[0] = pri
|
||||
for i := range auxiliaryTenantIDs {
|
||||
aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err)
|
||||
}
|
||||
mtCfg.cfgs[i+1] = aux
|
||||
}
|
||||
return mtCfg, nil
|
||||
}
|
||||
|
||||
type multiTenantOAuthConfig struct {
|
||||
// first config in the slice is the primary tenant
|
||||
cfgs []*OAuthConfig
|
||||
}
|
||||
|
||||
func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig {
|
||||
return m.cfgs[0]
|
||||
}
|
||||
|
||||
func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig {
|
||||
return m.cfgs[1:]
|
||||
}
|
||||
|
2
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
2
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
@@ -38,7 +38,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
|
||||
return sf(r)
|
||||
}
|
||||
|
||||
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
|
||||
// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
|
||||
// http.Request and pass it along or, first, pass the http.Request along then react to the
|
||||
// http.Response result.
|
||||
type SendDecorator func(Sender) Sender
|
||||
|
577
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
577
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
@@ -15,22 +15,26 @@ package adal
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/date"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
)
|
||||
|
||||
@@ -57,6 +61,9 @@ const (
|
||||
|
||||
// msiEndpoint is the well known endpoint for getting MSI authentications tokens
|
||||
msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||
|
||||
// the default number of attempts to refresh an MSI authentication token
|
||||
defaultMaxMSIRefreshAttempts = 5
|
||||
)
|
||||
|
||||
// OAuthTokenProvider is an interface which should be implemented by an access token retriever
|
||||
@@ -64,6 +71,12 @@ type OAuthTokenProvider interface {
|
||||
OAuthToken() string
|
||||
}
|
||||
|
||||
// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization.
|
||||
type MultitenantOAuthTokenProvider interface {
|
||||
PrimaryOAuthToken() string
|
||||
AuxiliaryOAuthTokens() []string
|
||||
}
|
||||
|
||||
// TokenRefreshError is an interface used by errors returned during token refresh.
|
||||
type TokenRefreshError interface {
|
||||
error
|
||||
@@ -77,23 +90,39 @@ type Refresher interface {
|
||||
EnsureFresh() error
|
||||
}
|
||||
|
||||
// RefresherWithContext is an interface for token refresh functionality
|
||||
type RefresherWithContext interface {
|
||||
RefreshWithContext(ctx context.Context) error
|
||||
RefreshExchangeWithContext(ctx context.Context, resource string) error
|
||||
EnsureFreshWithContext(ctx context.Context) error
|
||||
}
|
||||
|
||||
// TokenRefreshCallback is the type representing callbacks that will be called after
|
||||
// a successful token refresh
|
||||
type TokenRefreshCallback func(Token) error
|
||||
|
||||
// Token encapsulates the access token used to authorize Azure requests.
|
||||
// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response
|
||||
type Token struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
|
||||
ExpiresIn string `json:"expires_in"`
|
||||
ExpiresOn string `json:"expires_on"`
|
||||
NotBefore string `json:"not_before"`
|
||||
ExpiresIn json.Number `json:"expires_in"`
|
||||
ExpiresOn json.Number `json:"expires_on"`
|
||||
NotBefore json.Number `json:"not_before"`
|
||||
|
||||
Resource string `json:"resource"`
|
||||
Type string `json:"token_type"`
|
||||
}
|
||||
|
||||
func newToken() Token {
|
||||
return Token{
|
||||
ExpiresIn: "0",
|
||||
ExpiresOn: "0",
|
||||
NotBefore: "0",
|
||||
}
|
||||
}
|
||||
|
||||
// IsZero returns true if the token object is zero-initialized.
|
||||
func (t Token) IsZero() bool {
|
||||
return t == Token{}
|
||||
@@ -101,12 +130,12 @@ func (t Token) IsZero() bool {
|
||||
|
||||
// Expires returns the time.Time when the Token expires.
|
||||
func (t Token) Expires() time.Time {
|
||||
s, err := strconv.Atoi(t.ExpiresOn)
|
||||
s, err := t.ExpiresOn.Float64()
|
||||
if err != nil {
|
||||
s = -3600
|
||||
}
|
||||
|
||||
expiration := date.NewUnixTimeFromSeconds(float64(s))
|
||||
expiration := date.NewUnixTimeFromSeconds(s)
|
||||
|
||||
return time.Time(expiration).UTC()
|
||||
}
|
||||
@@ -127,6 +156,12 @@ func (t *Token) OAuthToken() string {
|
||||
return t.AccessToken
|
||||
}
|
||||
|
||||
// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form
|
||||
// that is submitted when acquiring an oAuth token.
|
||||
type ServicePrincipalSecret interface {
|
||||
SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error
|
||||
}
|
||||
|
||||
// ServicePrincipalNoSecret represents a secret type that contains no secret
|
||||
// meaning it is not valid for fetching a fresh token. This is used by Manual
|
||||
type ServicePrincipalNoSecret struct {
|
||||
@@ -138,15 +173,19 @@ func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePr
|
||||
return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token")
|
||||
}
|
||||
|
||||
// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form
|
||||
// that is submitted when acquiring an oAuth token.
|
||||
type ServicePrincipalSecret interface {
|
||||
SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) {
|
||||
type tokenType struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
return json.Marshal(tokenType{
|
||||
Type: "ServicePrincipalNoSecret",
|
||||
})
|
||||
}
|
||||
|
||||
// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization.
|
||||
type ServicePrincipalTokenSecret struct {
|
||||
ClientSecret string
|
||||
ClientSecret string `json:"value"`
|
||||
}
|
||||
|
||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||
@@ -156,49 +195,24 @@ func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *Ser
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) {
|
||||
type tokenType struct {
|
||||
Type string `json:"type"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
return json.Marshal(tokenType{
|
||||
Type: "ServicePrincipalTokenSecret",
|
||||
Value: tokenSecret.ClientSecret,
|
||||
})
|
||||
}
|
||||
|
||||
// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs.
|
||||
type ServicePrincipalCertificateSecret struct {
|
||||
Certificate *x509.Certificate
|
||||
PrivateKey *rsa.PrivateKey
|
||||
}
|
||||
|
||||
// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension.
|
||||
type ServicePrincipalMSISecret struct {
|
||||
}
|
||||
|
||||
// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth.
|
||||
type ServicePrincipalUsernamePasswordSecret struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth.
|
||||
type ServicePrincipalAuthorizationCodeSecret struct {
|
||||
ClientSecret string
|
||||
AuthorizationCode string
|
||||
RedirectURI string
|
||||
}
|
||||
|
||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||
func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||
v.Set("code", secret.AuthorizationCode)
|
||||
v.Set("client_secret", secret.ClientSecret)
|
||||
v.Set("redirect_uri", secret.RedirectURI)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||
func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||
v.Set("username", secret.Username)
|
||||
v.Set("password", secret.Password)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||
func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignJwt returns the JWT signed with the certificate's private key.
|
||||
func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) {
|
||||
hasher := sha1.New()
|
||||
@@ -218,10 +232,12 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo
|
||||
|
||||
token := jwt.New(jwt.SigningMethodRS256)
|
||||
token.Header["x5t"] = thumbprint
|
||||
x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)}
|
||||
token.Header["x5c"] = x5c
|
||||
token.Claims = jwt.MapClaims{
|
||||
"aud": spt.oauthConfig.TokenEndpoint.String(),
|
||||
"iss": spt.clientID,
|
||||
"sub": spt.clientID,
|
||||
"aud": spt.inner.OauthConfig.TokenEndpoint.String(),
|
||||
"iss": spt.inner.ClientID,
|
||||
"sub": spt.inner.ClientID,
|
||||
"jti": base64.URLEncoding.EncodeToString(jti),
|
||||
"nbf": time.Now().Unix(),
|
||||
"exp": time.Now().Add(time.Hour * 24).Unix(),
|
||||
@@ -244,19 +260,156 @@ func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *Se
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) {
|
||||
return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported")
|
||||
}
|
||||
|
||||
// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension.
|
||||
type ServicePrincipalMSISecret struct {
|
||||
}
|
||||
|
||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||
func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) {
|
||||
return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported")
|
||||
}
|
||||
|
||||
// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth.
|
||||
type ServicePrincipalUsernamePasswordSecret struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||
func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||
v.Set("username", secret.Username)
|
||||
v.Set("password", secret.Password)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) {
|
||||
type tokenType struct {
|
||||
Type string `json:"type"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
return json.Marshal(tokenType{
|
||||
Type: "ServicePrincipalUsernamePasswordSecret",
|
||||
Username: secret.Username,
|
||||
Password: secret.Password,
|
||||
})
|
||||
}
|
||||
|
||||
// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth.
|
||||
type ServicePrincipalAuthorizationCodeSecret struct {
|
||||
ClientSecret string `json:"value"`
|
||||
AuthorizationCode string `json:"authCode"`
|
||||
RedirectURI string `json:"redirect"`
|
||||
}
|
||||
|
||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||
func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||
v.Set("code", secret.AuthorizationCode)
|
||||
v.Set("client_secret", secret.ClientSecret)
|
||||
v.Set("redirect_uri", secret.RedirectURI)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) {
|
||||
type tokenType struct {
|
||||
Type string `json:"type"`
|
||||
Value string `json:"value"`
|
||||
AuthCode string `json:"authCode"`
|
||||
Redirect string `json:"redirect"`
|
||||
}
|
||||
return json.Marshal(tokenType{
|
||||
Type: "ServicePrincipalAuthorizationCodeSecret",
|
||||
Value: secret.ClientSecret,
|
||||
AuthCode: secret.AuthorizationCode,
|
||||
Redirect: secret.RedirectURI,
|
||||
})
|
||||
}
|
||||
|
||||
// ServicePrincipalToken encapsulates a Token created for a Service Principal.
|
||||
type ServicePrincipalToken struct {
|
||||
token Token
|
||||
secret ServicePrincipalSecret
|
||||
oauthConfig OAuthConfig
|
||||
clientID string
|
||||
resource string
|
||||
autoRefresh bool
|
||||
refreshLock *sync.RWMutex
|
||||
refreshWithin time.Duration
|
||||
sender Sender
|
||||
|
||||
inner servicePrincipalToken
|
||||
refreshLock *sync.RWMutex
|
||||
sender Sender
|
||||
refreshCallbacks []TokenRefreshCallback
|
||||
// MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token.
|
||||
MaxMSIRefreshAttempts int
|
||||
}
|
||||
|
||||
// MarshalTokenJSON returns the marshalled inner token.
|
||||
func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) {
|
||||
return json.Marshal(spt.inner.Token)
|
||||
}
|
||||
|
||||
// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks.
|
||||
func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) {
|
||||
spt.refreshCallbacks = callbacks
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(spt.inner)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error {
|
||||
// need to determine the token type
|
||||
raw := map[string]interface{}{}
|
||||
err := json.Unmarshal(data, &raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
secret := raw["secret"].(map[string]interface{})
|
||||
switch secret["type"] {
|
||||
case "ServicePrincipalNoSecret":
|
||||
spt.inner.Secret = &ServicePrincipalNoSecret{}
|
||||
case "ServicePrincipalTokenSecret":
|
||||
spt.inner.Secret = &ServicePrincipalTokenSecret{}
|
||||
case "ServicePrincipalCertificateSecret":
|
||||
return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported")
|
||||
case "ServicePrincipalMSISecret":
|
||||
return errors.New("unmarshalling ServicePrincipalMSISecret is not supported")
|
||||
case "ServicePrincipalUsernamePasswordSecret":
|
||||
spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{}
|
||||
case "ServicePrincipalAuthorizationCodeSecret":
|
||||
spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{}
|
||||
default:
|
||||
return fmt.Errorf("unrecognized token type '%s'", secret["type"])
|
||||
}
|
||||
err = json.Unmarshal(data, &spt.inner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Don't override the refreshLock or the sender if those have been already set.
|
||||
if spt.refreshLock == nil {
|
||||
spt.refreshLock = &sync.RWMutex{}
|
||||
}
|
||||
if spt.sender == nil {
|
||||
spt.sender = &http.Client{Transport: tracing.Transport}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// internal type used for marshalling/unmarshalling
|
||||
type servicePrincipalToken struct {
|
||||
Token Token `json:"token"`
|
||||
Secret ServicePrincipalSecret `json:"secret"`
|
||||
OauthConfig OAuthConfig `json:"oauth"`
|
||||
ClientID string `json:"clientID"`
|
||||
Resource string `json:"resource"`
|
||||
AutoRefresh bool `json:"autoRefresh"`
|
||||
RefreshWithin time.Duration `json:"refreshWithin"`
|
||||
}
|
||||
|
||||
func validateOAuthConfig(oac OAuthConfig) error {
|
||||
@@ -281,14 +434,17 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
|
||||
return nil, fmt.Errorf("parameter 'secret' cannot be nil")
|
||||
}
|
||||
spt := &ServicePrincipalToken{
|
||||
oauthConfig: oauthConfig,
|
||||
secret: secret,
|
||||
clientID: id,
|
||||
resource: resource,
|
||||
autoRefresh: true,
|
||||
inner: servicePrincipalToken{
|
||||
Token: newToken(),
|
||||
OauthConfig: oauthConfig,
|
||||
Secret: secret,
|
||||
ClientID: id,
|
||||
Resource: resource,
|
||||
AutoRefresh: true,
|
||||
RefreshWithin: defaultRefresh,
|
||||
},
|
||||
refreshLock: &sync.RWMutex{},
|
||||
refreshWithin: defaultRefresh,
|
||||
sender: &http.Client{},
|
||||
sender: &http.Client{Transport: tracing.Transport},
|
||||
refreshCallbacks: callbacks,
|
||||
}
|
||||
return spt, nil
|
||||
@@ -318,7 +474,39 @@ func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID s
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spt.token = token
|
||||
spt.inner.Token = token
|
||||
|
||||
return spt, nil
|
||||
}
|
||||
|
||||
// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret
|
||||
func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||
if err := validateOAuthConfig(oauthConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateStringParam(clientID, "clientID"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateStringParam(resource, "resource"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if secret == nil {
|
||||
return nil, fmt.Errorf("parameter 'secret' cannot be nil")
|
||||
}
|
||||
if token.IsZero() {
|
||||
return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized")
|
||||
}
|
||||
spt, err := NewServicePrincipalTokenWithSecret(
|
||||
oauthConfig,
|
||||
clientID,
|
||||
resource,
|
||||
secret,
|
||||
callbacks...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spt.inner.Token = token
|
||||
|
||||
return spt, nil
|
||||
}
|
||||
@@ -486,20 +674,24 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
|
||||
msiEndpointURL.RawQuery = v.Encode()
|
||||
|
||||
spt := &ServicePrincipalToken{
|
||||
oauthConfig: OAuthConfig{
|
||||
TokenEndpoint: *msiEndpointURL,
|
||||
inner: servicePrincipalToken{
|
||||
Token: newToken(),
|
||||
OauthConfig: OAuthConfig{
|
||||
TokenEndpoint: *msiEndpointURL,
|
||||
},
|
||||
Secret: &ServicePrincipalMSISecret{},
|
||||
Resource: resource,
|
||||
AutoRefresh: true,
|
||||
RefreshWithin: defaultRefresh,
|
||||
},
|
||||
secret: &ServicePrincipalMSISecret{},
|
||||
resource: resource,
|
||||
autoRefresh: true,
|
||||
refreshLock: &sync.RWMutex{},
|
||||
refreshWithin: defaultRefresh,
|
||||
sender: &http.Client{},
|
||||
refreshCallbacks: callbacks,
|
||||
refreshLock: &sync.RWMutex{},
|
||||
sender: &http.Client{Transport: tracing.Transport},
|
||||
refreshCallbacks: callbacks,
|
||||
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
|
||||
}
|
||||
|
||||
if userAssignedID != nil {
|
||||
spt.clientID = *userAssignedID
|
||||
spt.inner.ClientID = *userAssignedID
|
||||
}
|
||||
|
||||
return spt, nil
|
||||
@@ -528,12 +720,18 @@ func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError
|
||||
// EnsureFresh will refresh the token if it will expire within the refresh window (as set by
|
||||
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
|
||||
func (spt *ServicePrincipalToken) EnsureFresh() error {
|
||||
if spt.autoRefresh && spt.token.WillExpireIn(spt.refreshWithin) {
|
||||
return spt.EnsureFreshWithContext(context.Background())
|
||||
}
|
||||
|
||||
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
|
||||
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
|
||||
func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
|
||||
if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) {
|
||||
// take the write lock then check to see if the token was already refreshed
|
||||
spt.refreshLock.Lock()
|
||||
defer spt.refreshLock.Unlock()
|
||||
if spt.token.WillExpireIn(spt.refreshWithin) {
|
||||
return spt.refreshInternal(spt.resource)
|
||||
if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) {
|
||||
return spt.refreshInternal(ctx, spt.inner.Resource)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -543,7 +741,7 @@ func (spt *ServicePrincipalToken) EnsureFresh() error {
|
||||
func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error {
|
||||
if spt.refreshCallbacks != nil {
|
||||
for _, callback := range spt.refreshCallbacks {
|
||||
err := callback(spt.token)
|
||||
err := callback(spt.inner.Token)
|
||||
if err != nil {
|
||||
return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err)
|
||||
}
|
||||
@@ -555,21 +753,33 @@ func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error {
|
||||
// Refresh obtains a fresh token for the Service Principal.
|
||||
// This method is not safe for concurrent use and should be syncrhonized.
|
||||
func (spt *ServicePrincipalToken) Refresh() error {
|
||||
return spt.RefreshWithContext(context.Background())
|
||||
}
|
||||
|
||||
// RefreshWithContext obtains a fresh token for the Service Principal.
|
||||
// This method is not safe for concurrent use and should be syncrhonized.
|
||||
func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
|
||||
spt.refreshLock.Lock()
|
||||
defer spt.refreshLock.Unlock()
|
||||
return spt.refreshInternal(spt.resource)
|
||||
return spt.refreshInternal(ctx, spt.inner.Resource)
|
||||
}
|
||||
|
||||
// RefreshExchange refreshes the token, but for a different resource.
|
||||
// This method is not safe for concurrent use and should be syncrhonized.
|
||||
func (spt *ServicePrincipalToken) RefreshExchange(resource string) error {
|
||||
return spt.RefreshExchangeWithContext(context.Background(), resource)
|
||||
}
|
||||
|
||||
// RefreshExchangeWithContext refreshes the token, but for a different resource.
|
||||
// This method is not safe for concurrent use and should be syncrhonized.
|
||||
func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
|
||||
spt.refreshLock.Lock()
|
||||
defer spt.refreshLock.Unlock()
|
||||
return spt.refreshInternal(resource)
|
||||
return spt.refreshInternal(ctx, resource)
|
||||
}
|
||||
|
||||
func (spt *ServicePrincipalToken) getGrantType() string {
|
||||
switch spt.secret.(type) {
|
||||
switch spt.inner.Secret.(type) {
|
||||
case *ServicePrincipalUsernamePasswordSecret:
|
||||
return OAuthGrantTypeUserPass
|
||||
case *ServicePrincipalAuthorizationCodeSecret:
|
||||
@@ -587,23 +797,32 @@ func isIMDS(u url.URL) bool {
|
||||
return u.Host == imds.Host && u.Path == imds.Path
|
||||
}
|
||||
|
||||
func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
|
||||
req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), nil)
|
||||
func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error {
|
||||
req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
|
||||
}
|
||||
|
||||
if !isIMDS(spt.oauthConfig.TokenEndpoint) {
|
||||
req.Header.Add("User-Agent", UserAgent())
|
||||
req = req.WithContext(ctx)
|
||||
if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
|
||||
v := url.Values{}
|
||||
v.Set("client_id", spt.clientID)
|
||||
v.Set("client_id", spt.inner.ClientID)
|
||||
v.Set("resource", resource)
|
||||
|
||||
if spt.token.RefreshToken != "" {
|
||||
if spt.inner.Token.RefreshToken != "" {
|
||||
v.Set("grant_type", OAuthGrantTypeRefreshToken)
|
||||
v.Set("refresh_token", spt.token.RefreshToken)
|
||||
v.Set("refresh_token", spt.inner.Token.RefreshToken)
|
||||
// web apps must specify client_secret when refreshing tokens
|
||||
// see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens
|
||||
if spt.getGrantType() == OAuthGrantTypeAuthorizationCode {
|
||||
err := spt.inner.Secret.SetAuthenticationValues(spt, &v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
v.Set("grant_type", spt.getGrantType())
|
||||
err := spt.secret.SetAuthenticationValues(spt, &v)
|
||||
err := spt.inner.Secret.SetAuthenticationValues(spt, &v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -616,14 +835,19 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
|
||||
req.Body = body
|
||||
}
|
||||
|
||||
if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok {
|
||||
if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok {
|
||||
req.Method = http.MethodGet
|
||||
req.Header.Set(metadataHeader, "true")
|
||||
}
|
||||
|
||||
resp, err := spt.sender.Do(req)
|
||||
var resp *http.Response
|
||||
if isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
|
||||
resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts)
|
||||
} else {
|
||||
resp, err = spt.sender.Do(req)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err)
|
||||
return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
@@ -631,11 +855,15 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if err != nil {
|
||||
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode), resp)
|
||||
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp)
|
||||
}
|
||||
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp)
|
||||
}
|
||||
|
||||
// for the following error cases don't return a TokenRefreshError. the operation succeeded
|
||||
// but some transient failure happened during deserialization. by returning a generic error
|
||||
// the retry logic will kick in (we don't retry on TokenRefreshError).
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err)
|
||||
}
|
||||
@@ -648,20 +876,99 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
|
||||
return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb))
|
||||
}
|
||||
|
||||
spt.token = token
|
||||
spt.inner.Token = token
|
||||
|
||||
return spt.InvokeRefreshCallbacks(token)
|
||||
}
|
||||
|
||||
// retry logic specific to retrieving a token from the IMDS endpoint
|
||||
func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) {
|
||||
// copied from client.go due to circular dependency
|
||||
retries := []int{
|
||||
http.StatusRequestTimeout, // 408
|
||||
http.StatusTooManyRequests, // 429
|
||||
http.StatusInternalServerError, // 500
|
||||
http.StatusBadGateway, // 502
|
||||
http.StatusServiceUnavailable, // 503
|
||||
http.StatusGatewayTimeout, // 504
|
||||
}
|
||||
// extra retry status codes specific to IMDS
|
||||
retries = append(retries,
|
||||
http.StatusNotFound,
|
||||
http.StatusGone,
|
||||
// all remaining 5xx
|
||||
http.StatusNotImplemented,
|
||||
http.StatusHTTPVersionNotSupported,
|
||||
http.StatusVariantAlsoNegotiates,
|
||||
http.StatusInsufficientStorage,
|
||||
http.StatusLoopDetected,
|
||||
http.StatusNotExtended,
|
||||
http.StatusNetworkAuthenticationRequired)
|
||||
|
||||
// see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance
|
||||
|
||||
const maxDelay time.Duration = 60 * time.Second
|
||||
|
||||
attempt := 0
|
||||
delay := time.Duration(0)
|
||||
|
||||
for attempt < maxAttempts {
|
||||
resp, err = sender.Do(req)
|
||||
// retry on temporary network errors, e.g. transient network failures.
|
||||
// if we don't receive a response then assume we can't connect to the
|
||||
// endpoint so we're likely not running on an Azure VM so don't retry.
|
||||
if (err != nil && !isTemporaryNetworkError(err)) || resp == nil || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) {
|
||||
return
|
||||
}
|
||||
|
||||
// perform exponential backoff with a cap.
|
||||
// must increment attempt before calculating delay.
|
||||
attempt++
|
||||
// the base value of 2 is the "delta backoff" as specified in the guidance doc
|
||||
delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second)
|
||||
if delay > maxDelay {
|
||||
delay = maxDelay
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
// intentionally left blank
|
||||
case <-req.Context().Done():
|
||||
err = req.Context().Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// returns true if the specified error is a temporary network error or false if it's not.
|
||||
// if the error doesn't implement the net.Error interface the return value is true.
|
||||
func isTemporaryNetworkError(err error) bool {
|
||||
if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// returns true if slice ints contains the value n
|
||||
func containsInt(ints []int, n int) bool {
|
||||
for _, i := range ints {
|
||||
if i == n {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SetAutoRefresh enables or disables automatic refreshing of stale tokens.
|
||||
func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) {
|
||||
spt.autoRefresh = autoRefresh
|
||||
spt.inner.AutoRefresh = autoRefresh
|
||||
}
|
||||
|
||||
// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will
|
||||
// refresh the token.
|
||||
func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) {
|
||||
spt.refreshWithin = d
|
||||
spt.inner.RefreshWithin = d
|
||||
return
|
||||
}
|
||||
|
||||
@@ -673,12 +980,76 @@ func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s }
|
||||
func (spt *ServicePrincipalToken) OAuthToken() string {
|
||||
spt.refreshLock.RLock()
|
||||
defer spt.refreshLock.RUnlock()
|
||||
return spt.token.OAuthToken()
|
||||
return spt.inner.Token.OAuthToken()
|
||||
}
|
||||
|
||||
// Token returns a copy of the current token.
|
||||
func (spt *ServicePrincipalToken) Token() Token {
|
||||
spt.refreshLock.RLock()
|
||||
defer spt.refreshLock.RUnlock()
|
||||
return spt.token
|
||||
return spt.inner.Token
|
||||
}
|
||||
|
||||
// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization.
|
||||
type MultiTenantServicePrincipalToken struct {
|
||||
PrimaryToken *ServicePrincipalToken
|
||||
AuxiliaryTokens []*ServicePrincipalToken
|
||||
}
|
||||
|
||||
// PrimaryOAuthToken returns the primary authorization token.
|
||||
func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string {
|
||||
return mt.PrimaryToken.OAuthToken()
|
||||
}
|
||||
|
||||
// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens.
|
||||
func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string {
|
||||
tokens := make([]string, len(mt.AuxiliaryTokens))
|
||||
for i := range mt.AuxiliaryTokens {
|
||||
tokens[i] = mt.AuxiliaryTokens[i].OAuthToken()
|
||||
}
|
||||
return tokens
|
||||
}
|
||||
|
||||
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
|
||||
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
|
||||
func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
|
||||
if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
|
||||
return fmt.Errorf("failed to refresh primary token: %v", err)
|
||||
}
|
||||
for _, aux := range mt.AuxiliaryTokens {
|
||||
if err := aux.EnsureFreshWithContext(ctx); err != nil {
|
||||
return fmt.Errorf("failed to refresh auxiliary token: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource.
|
||||
func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) {
|
||||
if err := validateStringParam(clientID, "clientID"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateStringParam(secret, "secret"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateStringParam(resource, "resource"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auxTenants := multiTenantCfg.AuxiliaryTenants()
|
||||
m := MultiTenantServicePrincipalToken{
|
||||
AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)),
|
||||
}
|
||||
primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err)
|
||||
}
|
||||
m.PrimaryToken = primary
|
||||
for i := range auxTenants {
|
||||
aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err)
|
||||
}
|
||||
m.AuxiliaryTokens[i] = aux
|
||||
}
|
||||
return &m, nil
|
||||
}
|
||||
|
45
vendor/github.com/Azure/go-autorest/autorest/adal/version.go
generated
vendored
Normal file
45
vendor/github.com/Azure/go-autorest/autorest/adal/version.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package adal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
const number = "v1.0.0"
|
||||
|
||||
var (
|
||||
ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s",
|
||||
runtime.Version(),
|
||||
runtime.GOARCH,
|
||||
runtime.GOOS,
|
||||
number,
|
||||
)
|
||||
)
|
||||
|
||||
// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version.
|
||||
func UserAgent() string {
|
||||
return ua
|
||||
}
|
||||
|
||||
// AddToUserAgent adds an extension to the current user agent
|
||||
func AddToUserAgent(extension string) error {
|
||||
if extension != "" {
|
||||
ua = fmt.Sprintf("%s %s", ua, extension)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua)
|
||||
}
|
103
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
103
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
@@ -15,12 +15,14 @@ package autorest
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -30,6 +32,8 @@ const (
|
||||
apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key"
|
||||
bingAPISdkHeader = "X-BingApis-SDK-Client"
|
||||
golangBingAPISdkHeaderValue = "Go-SDK"
|
||||
authorization = "Authorization"
|
||||
basic = "Basic"
|
||||
)
|
||||
|
||||
// Authorizer is the interface that provides a PrepareDecorator used to supply request
|
||||
@@ -68,7 +72,7 @@ func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[str
|
||||
return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters}
|
||||
}
|
||||
|
||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters
|
||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters.
|
||||
func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters))
|
||||
@@ -113,17 +117,19 @@ func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
refresher, ok := ba.tokenProvider.(adal.Refresher)
|
||||
if ok {
|
||||
err := refresher.EnsureFresh()
|
||||
if err != nil {
|
||||
var resp *http.Response
|
||||
if tokError, ok := err.(adal.TokenRefreshError); ok {
|
||||
resp = tokError.Response()
|
||||
}
|
||||
return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp,
|
||||
"Failed to refresh the Token for request to %s", r.URL)
|
||||
// the ordering is important here, prefer RefresherWithContext if available
|
||||
if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok {
|
||||
err = refresher.EnsureFreshWithContext(r.Context())
|
||||
} else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok {
|
||||
err = refresher.EnsureFresh()
|
||||
}
|
||||
if err != nil {
|
||||
var resp *http.Response
|
||||
if tokError, ok := err.(adal.TokenRefreshError); ok {
|
||||
resp = tokError.Response()
|
||||
}
|
||||
return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp,
|
||||
"Failed to refresh the Token for request to %s", r.URL)
|
||||
}
|
||||
return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken())))
|
||||
}
|
||||
@@ -145,7 +151,7 @@ type BearerAuthorizerCallback struct {
|
||||
// is invoked when the HTTP request is submitted.
|
||||
func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
|
||||
if sender == nil {
|
||||
sender = &http.Client{}
|
||||
sender = &http.Client{Transport: tracing.Transport}
|
||||
}
|
||||
return &BearerAuthorizerCallback{sender: sender, callback: callback}
|
||||
}
|
||||
@@ -255,3 +261,76 @@ func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
}
|
||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
||||
}
|
||||
|
||||
// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header
|
||||
// with the value "Basic <TOKEN>" where <TOKEN> is a base64-encoded username:password tuple.
|
||||
type BasicAuthorizer struct {
|
||||
userName string
|
||||
password string
|
||||
}
|
||||
|
||||
// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password.
|
||||
func NewBasicAuthorizer(userName, password string) *BasicAuthorizer {
|
||||
return &BasicAuthorizer{
|
||||
userName: userName,
|
||||
password: password,
|
||||
}
|
||||
}
|
||||
|
||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
||||
// value is "Basic " followed by the base64-encoded username:password tuple.
|
||||
func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
headers := make(map[string]interface{})
|
||||
headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password)))
|
||||
|
||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
||||
}
|
||||
|
||||
// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants.
|
||||
type MultiTenantServicePrincipalTokenAuthorizer interface {
|
||||
WithAuthorization() PrepareDecorator
|
||||
}
|
||||
|
||||
// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider
|
||||
func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer {
|
||||
return &multiTenantSPTAuthorizer{tp: tp}
|
||||
}
|
||||
|
||||
type multiTenantSPTAuthorizer struct {
|
||||
tp adal.MultitenantOAuthTokenProvider
|
||||
}
|
||||
|
||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the
|
||||
// primary token along with the auxiliary authorization header using the auxiliary tokens.
|
||||
//
|
||||
// By default, the token will be automatically refreshed through the Refresher interface.
|
||||
func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
if refresher, ok := mt.tp.(adal.RefresherWithContext); ok {
|
||||
err = refresher.EnsureFreshWithContext(r.Context())
|
||||
if err != nil {
|
||||
var resp *http.Response
|
||||
if tokError, ok := err.(adal.TokenRefreshError); ok {
|
||||
resp = tokError.Response()
|
||||
}
|
||||
return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp,
|
||||
"Failed to refresh one or more Tokens for request to %s", r.URL)
|
||||
}
|
||||
}
|
||||
r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken())))
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
auxTokens := mt.tp.AuxiliaryOAuthTokens()
|
||||
for i := range auxTokens {
|
||||
auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i])
|
||||
}
|
||||
return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; ")))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
1043
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
1043
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
File diff suppressed because it is too large
Load Diff
523
vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go
generated
vendored
523
vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go
generated
vendored
@@ -31,126 +31,416 @@ import (
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/azure/cli"
|
||||
"github.com/dimchansky/utfbom"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
)
|
||||
|
||||
// The possible keys in the Values map.
|
||||
const (
|
||||
SubscriptionID = "AZURE_SUBSCRIPTION_ID"
|
||||
TenantID = "AZURE_TENANT_ID"
|
||||
AuxiliaryTenantIDs = "AZURE_AUXILIARY_TENANT_IDS"
|
||||
ClientID = "AZURE_CLIENT_ID"
|
||||
ClientSecret = "AZURE_CLIENT_SECRET"
|
||||
CertificatePath = "AZURE_CERTIFICATE_PATH"
|
||||
CertificatePassword = "AZURE_CERTIFICATE_PASSWORD"
|
||||
Username = "AZURE_USERNAME"
|
||||
Password = "AZURE_PASSWORD"
|
||||
EnvironmentName = "AZURE_ENVIRONMENT"
|
||||
Resource = "AZURE_AD_RESOURCE"
|
||||
ActiveDirectoryEndpoint = "ActiveDirectoryEndpoint"
|
||||
ResourceManagerEndpoint = "ResourceManagerEndpoint"
|
||||
GraphResourceID = "GraphResourceID"
|
||||
SQLManagementEndpoint = "SQLManagementEndpoint"
|
||||
GalleryEndpoint = "GalleryEndpoint"
|
||||
ManagementEndpoint = "ManagementEndpoint"
|
||||
)
|
||||
|
||||
// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order:
|
||||
// 1. Client credentials
|
||||
// 2. Client certificate
|
||||
// 3. Username password
|
||||
// 4. MSI
|
||||
func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) {
|
||||
tenantID := os.Getenv("AZURE_TENANT_ID")
|
||||
clientID := os.Getenv("AZURE_CLIENT_ID")
|
||||
clientSecret := os.Getenv("AZURE_CLIENT_SECRET")
|
||||
certificatePath := os.Getenv("AZURE_CERTIFICATE_PATH")
|
||||
certificatePassword := os.Getenv("AZURE_CERTIFICATE_PASSWORD")
|
||||
username := os.Getenv("AZURE_USERNAME")
|
||||
password := os.Getenv("AZURE_PASSWORD")
|
||||
envName := os.Getenv("AZURE_ENVIRONMENT")
|
||||
resource := os.Getenv("AZURE_AD_RESOURCE")
|
||||
settings, err := GetSettingsFromEnvironment()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return settings.GetAuthorizer()
|
||||
}
|
||||
|
||||
var env azure.Environment
|
||||
if envName == "" {
|
||||
env = azure.PublicCloud
|
||||
// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order:
|
||||
// 1. Client credentials
|
||||
// 2. Client certificate
|
||||
// 3. Username password
|
||||
// 4. MSI
|
||||
func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) {
|
||||
settings, err := GetSettingsFromEnvironment()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
settings.Values[Resource] = resource
|
||||
return settings.GetAuthorizer()
|
||||
}
|
||||
|
||||
// EnvironmentSettings contains the available authentication settings.
|
||||
type EnvironmentSettings struct {
|
||||
Values map[string]string
|
||||
Environment azure.Environment
|
||||
}
|
||||
|
||||
// GetSettingsFromEnvironment returns the available authentication settings from the environment.
|
||||
func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) {
|
||||
s = EnvironmentSettings{
|
||||
Values: map[string]string{},
|
||||
}
|
||||
s.setValue(SubscriptionID)
|
||||
s.setValue(TenantID)
|
||||
s.setValue(AuxiliaryTenantIDs)
|
||||
s.setValue(ClientID)
|
||||
s.setValue(ClientSecret)
|
||||
s.setValue(CertificatePath)
|
||||
s.setValue(CertificatePassword)
|
||||
s.setValue(Username)
|
||||
s.setValue(Password)
|
||||
s.setValue(EnvironmentName)
|
||||
s.setValue(Resource)
|
||||
if v := s.Values[EnvironmentName]; v == "" {
|
||||
s.Environment = azure.PublicCloud
|
||||
} else {
|
||||
var err error
|
||||
env, err = azure.EnvironmentFromName(envName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
s.Environment, err = azure.EnvironmentFromName(v)
|
||||
}
|
||||
if s.Values[Resource] == "" {
|
||||
s.Values[Resource] = s.Environment.ResourceManagerEndpoint
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetSubscriptionID returns the available subscription ID or an empty string.
|
||||
func (settings EnvironmentSettings) GetSubscriptionID() string {
|
||||
return settings.Values[SubscriptionID]
|
||||
}
|
||||
|
||||
// adds the specified environment variable value to the Values map if it exists
|
||||
func (settings EnvironmentSettings) setValue(key string) {
|
||||
if v := os.Getenv(key); v != "" {
|
||||
settings.Values[key] = v
|
||||
}
|
||||
}
|
||||
|
||||
// helper to return client and tenant IDs
|
||||
func (settings EnvironmentSettings) getClientAndTenant() (string, string) {
|
||||
clientID := settings.Values[ClientID]
|
||||
tenantID := settings.Values[TenantID]
|
||||
return clientID, tenantID
|
||||
}
|
||||
|
||||
// GetClientCredentials creates a config object from the available client credentials.
|
||||
// An error is returned if no client credentials are available.
|
||||
func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsConfig, error) {
|
||||
secret := settings.Values[ClientSecret]
|
||||
if secret == "" {
|
||||
return ClientCredentialsConfig{}, errors.New("missing client secret")
|
||||
}
|
||||
clientID, tenantID := settings.getClientAndTenant()
|
||||
config := NewClientCredentialsConfig(clientID, secret, tenantID)
|
||||
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
|
||||
config.Resource = settings.Values[Resource]
|
||||
if auxTenants, ok := settings.Values[AuxiliaryTenantIDs]; ok {
|
||||
config.AuxTenants = strings.Split(auxTenants, ";")
|
||||
for i := range config.AuxTenants {
|
||||
config.AuxTenants[i] = strings.TrimSpace(config.AuxTenants[i])
|
||||
}
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
if resource == "" {
|
||||
resource = env.ResourceManagerEndpoint
|
||||
// GetClientCertificate creates a config object from the available certificate credentials.
|
||||
// An error is returned if no certificate credentials are available.
|
||||
func (settings EnvironmentSettings) GetClientCertificate() (ClientCertificateConfig, error) {
|
||||
certPath := settings.Values[CertificatePath]
|
||||
if certPath == "" {
|
||||
return ClientCertificateConfig{}, errors.New("missing certificate path")
|
||||
}
|
||||
certPwd := settings.Values[CertificatePassword]
|
||||
clientID, tenantID := settings.getClientAndTenant()
|
||||
config := NewClientCertificateConfig(certPath, certPwd, clientID, tenantID)
|
||||
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
|
||||
config.Resource = settings.Values[Resource]
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// GetUsernamePassword creates a config object from the available username/password credentials.
|
||||
// An error is returned if no username/password credentials are available.
|
||||
func (settings EnvironmentSettings) GetUsernamePassword() (UsernamePasswordConfig, error) {
|
||||
username := settings.Values[Username]
|
||||
password := settings.Values[Password]
|
||||
if username == "" || password == "" {
|
||||
return UsernamePasswordConfig{}, errors.New("missing username/password")
|
||||
}
|
||||
clientID, tenantID := settings.getClientAndTenant()
|
||||
config := NewUsernamePasswordConfig(username, password, clientID, tenantID)
|
||||
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
|
||||
config.Resource = settings.Values[Resource]
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// GetMSI creates a MSI config object from the available client ID.
|
||||
func (settings EnvironmentSettings) GetMSI() MSIConfig {
|
||||
config := NewMSIConfig()
|
||||
config.Resource = settings.Values[Resource]
|
||||
config.ClientID = settings.Values[ClientID]
|
||||
return config
|
||||
}
|
||||
|
||||
// GetDeviceFlow creates a device-flow config object from the available client and tenant IDs.
|
||||
func (settings EnvironmentSettings) GetDeviceFlow() DeviceFlowConfig {
|
||||
clientID, tenantID := settings.getClientAndTenant()
|
||||
config := NewDeviceFlowConfig(clientID, tenantID)
|
||||
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
|
||||
config.Resource = settings.Values[Resource]
|
||||
return config
|
||||
}
|
||||
|
||||
// GetAuthorizer creates an Authorizer configured from environment variables in the order:
|
||||
// 1. Client credentials
|
||||
// 2. Client certificate
|
||||
// 3. Username password
|
||||
// 4. MSI
|
||||
func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) {
|
||||
//1.Client Credentials
|
||||
if clientSecret != "" {
|
||||
config := NewClientCredentialsConfig(clientID, clientSecret, tenantID)
|
||||
config.AADEndpoint = env.ActiveDirectoryEndpoint
|
||||
config.Resource = resource
|
||||
return config.Authorizer()
|
||||
if c, e := settings.GetClientCredentials(); e == nil {
|
||||
return c.Authorizer()
|
||||
}
|
||||
|
||||
//2. Client Certificate
|
||||
if certificatePath != "" {
|
||||
config := NewClientCertificateConfig(certificatePath, certificatePassword, clientID, tenantID)
|
||||
config.AADEndpoint = env.ActiveDirectoryEndpoint
|
||||
config.Resource = resource
|
||||
return config.Authorizer()
|
||||
if c, e := settings.GetClientCertificate(); e == nil {
|
||||
return c.Authorizer()
|
||||
}
|
||||
|
||||
//3. Username Password
|
||||
if username != "" && password != "" {
|
||||
config := NewUsernamePasswordConfig(username, password, clientID, tenantID)
|
||||
config.AADEndpoint = env.ActiveDirectoryEndpoint
|
||||
config.Resource = resource
|
||||
return config.Authorizer()
|
||||
if c, e := settings.GetUsernamePassword(); e == nil {
|
||||
return c.Authorizer()
|
||||
}
|
||||
|
||||
// 4. MSI
|
||||
config := NewMSIConfig()
|
||||
config.Resource = resource
|
||||
config.ClientID = clientID
|
||||
return config.Authorizer()
|
||||
return settings.GetMSI().Authorizer()
|
||||
}
|
||||
|
||||
// NewAuthorizerFromFile creates an Authorizer configured from a configuration file.
|
||||
// NewAuthorizerFromFile creates an Authorizer configured from a configuration file in the following order.
|
||||
// 1. Client credentials
|
||||
// 2. Client certificate
|
||||
func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) {
|
||||
settings, err := GetSettingsFromFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if a, err := settings.ClientCredentialsAuthorizer(baseURI); err == nil {
|
||||
return a, err
|
||||
}
|
||||
if a, err := settings.ClientCertificateAuthorizer(baseURI); err == nil {
|
||||
return a, err
|
||||
}
|
||||
return nil, errors.New("auth file missing client and certificate credentials")
|
||||
}
|
||||
|
||||
// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file in the following order.
|
||||
// 1. Client credentials
|
||||
// 2. Client certificate
|
||||
func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) {
|
||||
s, err := GetSettingsFromFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil {
|
||||
return a, err
|
||||
}
|
||||
if a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil {
|
||||
return a, err
|
||||
}
|
||||
return nil, errors.New("auth file missing client and certificate credentials")
|
||||
}
|
||||
|
||||
// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
|
||||
func NewAuthorizerFromCLI() (autorest.Authorizer, error) {
|
||||
settings, err := GetSettingsFromEnvironment()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if settings.Values[Resource] == "" {
|
||||
settings.Values[Resource] = settings.Environment.ResourceManagerEndpoint
|
||||
}
|
||||
|
||||
return NewAuthorizerFromCLIWithResource(settings.Values[Resource])
|
||||
}
|
||||
|
||||
// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
|
||||
func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) {
|
||||
token, err := cli.GetTokenFromCLI(resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
adalToken, err := token.ToADALToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return autorest.NewBearerAuthorizer(&adalToken), nil
|
||||
}
|
||||
|
||||
// GetSettingsFromFile returns the available authentication settings from an Azure CLI authentication file.
|
||||
func GetSettingsFromFile() (FileSettings, error) {
|
||||
s := FileSettings{}
|
||||
fileLocation := os.Getenv("AZURE_AUTH_LOCATION")
|
||||
if fileLocation == "" {
|
||||
return nil, errors.New("auth file not found. Environment variable AZURE_AUTH_LOCATION is not set")
|
||||
return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set")
|
||||
}
|
||||
|
||||
contents, err := ioutil.ReadFile(fileLocation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return s, err
|
||||
}
|
||||
|
||||
// Auth file might be encoded
|
||||
decoded, err := decode(contents)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return s, err
|
||||
}
|
||||
|
||||
file := file{}
|
||||
err = json.Unmarshal(decoded, &file)
|
||||
authFile := map[string]interface{}{}
|
||||
err = json.Unmarshal(decoded, &authFile)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
|
||||
s.Values = map[string]string{}
|
||||
s.setKeyValue(ClientID, authFile["clientId"])
|
||||
s.setKeyValue(ClientSecret, authFile["clientSecret"])
|
||||
s.setKeyValue(CertificatePath, authFile["clientCertificate"])
|
||||
s.setKeyValue(CertificatePassword, authFile["clientCertificatePassword"])
|
||||
s.setKeyValue(SubscriptionID, authFile["subscriptionId"])
|
||||
s.setKeyValue(TenantID, authFile["tenantId"])
|
||||
s.setKeyValue(ActiveDirectoryEndpoint, authFile["activeDirectoryEndpointUrl"])
|
||||
s.setKeyValue(ResourceManagerEndpoint, authFile["resourceManagerEndpointUrl"])
|
||||
s.setKeyValue(GraphResourceID, authFile["activeDirectoryGraphResourceId"])
|
||||
s.setKeyValue(SQLManagementEndpoint, authFile["sqlManagementEndpointUrl"])
|
||||
s.setKeyValue(GalleryEndpoint, authFile["galleryEndpointUrl"])
|
||||
s.setKeyValue(ManagementEndpoint, authFile["managementEndpointUrl"])
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// FileSettings contains the available authentication settings.
|
||||
type FileSettings struct {
|
||||
Values map[string]string
|
||||
}
|
||||
|
||||
// GetSubscriptionID returns the available subscription ID or an empty string.
|
||||
func (settings FileSettings) GetSubscriptionID() string {
|
||||
return settings.Values[SubscriptionID]
|
||||
}
|
||||
|
||||
// adds the specified value to the Values map if it isn't nil
|
||||
func (settings FileSettings) setKeyValue(key string, val interface{}) {
|
||||
if val != nil {
|
||||
settings.Values[key] = val.(string)
|
||||
}
|
||||
}
|
||||
|
||||
// returns the specified AAD endpoint or the public cloud endpoint if unspecified
|
||||
func (settings FileSettings) getAADEndpoint() string {
|
||||
if v, ok := settings.Values[ActiveDirectoryEndpoint]; ok {
|
||||
return v
|
||||
}
|
||||
return azure.PublicCloud.ActiveDirectoryEndpoint
|
||||
}
|
||||
|
||||
// ServicePrincipalTokenFromClientCredentials creates a ServicePrincipalToken from the available client credentials.
|
||||
func (settings FileSettings) ServicePrincipalTokenFromClientCredentials(baseURI string) (*adal.ServicePrincipalToken, error) {
|
||||
resource, err := settings.getResourceForToken(baseURI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource)
|
||||
}
|
||||
|
||||
resource, err := getResourceForToken(file, baseURI)
|
||||
// ClientCredentialsAuthorizer creates an authorizer from the available client credentials.
|
||||
func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) {
|
||||
resource, err := settings.getResourceForToken(baseURI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return settings.ClientCredentialsAuthorizerWithResource(resource)
|
||||
}
|
||||
|
||||
config, err := adal.NewOAuthConfig(file.ActiveDirectoryEndpoint, file.TenantID)
|
||||
// ServicePrincipalTokenFromClientCredentialsWithResource creates a ServicePrincipalToken
|
||||
// from the available client credentials and the specified resource.
|
||||
func (settings FileSettings) ServicePrincipalTokenFromClientCredentialsWithResource(resource string) (*adal.ServicePrincipalToken, error) {
|
||||
if _, ok := settings.Values[ClientSecret]; !ok {
|
||||
return nil, errors.New("missing client secret")
|
||||
}
|
||||
config, err := adal.NewOAuthConfig(settings.getAADEndpoint(), settings.Values[TenantID])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return adal.NewServicePrincipalToken(*config, settings.Values[ClientID], settings.Values[ClientSecret], resource)
|
||||
}
|
||||
|
||||
spToken, err := adal.NewServicePrincipalToken(*config, file.ClientID, file.ClientSecret, resource)
|
||||
func (settings FileSettings) clientCertificateConfigWithResource(resource string) (ClientCertificateConfig, error) {
|
||||
if _, ok := settings.Values[CertificatePath]; !ok {
|
||||
return ClientCertificateConfig{}, errors.New("missing certificate path")
|
||||
}
|
||||
cfg := NewClientCertificateConfig(settings.Values[CertificatePath], settings.Values[CertificatePassword], settings.Values[ClientID], settings.Values[TenantID])
|
||||
cfg.AADEndpoint = settings.getAADEndpoint()
|
||||
cfg.Resource = resource
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// ClientCredentialsAuthorizerWithResource creates an authorizer from the available client credentials and the specified resource.
|
||||
func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) {
|
||||
spToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return autorest.NewBearerAuthorizer(spToken), nil
|
||||
}
|
||||
|
||||
// File represents the authentication file
|
||||
type file struct {
|
||||
ClientID string `json:"clientId,omitempty"`
|
||||
ClientSecret string `json:"clientSecret,omitempty"`
|
||||
SubscriptionID string `json:"subscriptionId,omitempty"`
|
||||
TenantID string `json:"tenantId,omitempty"`
|
||||
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpointUrl,omitempty"`
|
||||
ResourceManagerEndpoint string `json:"resourceManagerEndpointUrl,omitempty"`
|
||||
GraphResourceID string `json:"activeDirectoryGraphResourceId,omitempty"`
|
||||
SQLManagementEndpoint string `json:"sqlManagementEndpointUrl,omitempty"`
|
||||
GalleryEndpoint string `json:"galleryEndpointUrl,omitempty"`
|
||||
ManagementEndpoint string `json:"managementEndpointUrl,omitempty"`
|
||||
// ServicePrincipalTokenFromClientCertificate creates a ServicePrincipalToken from the available certificate credentials.
|
||||
func (settings FileSettings) ServicePrincipalTokenFromClientCertificate(baseURI string) (*adal.ServicePrincipalToken, error) {
|
||||
resource, err := settings.getResourceForToken(baseURI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return settings.ServicePrincipalTokenFromClientCertificateWithResource(resource)
|
||||
}
|
||||
|
||||
// ClientCertificateAuthorizer creates an authorizer from the available certificate credentials.
|
||||
func (settings FileSettings) ClientCertificateAuthorizer(baseURI string) (autorest.Authorizer, error) {
|
||||
resource, err := settings.getResourceForToken(baseURI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return settings.ClientCertificateAuthorizerWithResource(resource)
|
||||
}
|
||||
|
||||
// ServicePrincipalTokenFromClientCertificateWithResource creates a ServicePrincipalToken from the available certificate credentials.
|
||||
func (settings FileSettings) ServicePrincipalTokenFromClientCertificateWithResource(resource string) (*adal.ServicePrincipalToken, error) {
|
||||
cfg, err := settings.clientCertificateConfigWithResource(resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cfg.ServicePrincipalToken()
|
||||
}
|
||||
|
||||
// ClientCertificateAuthorizerWithResource creates an authorizer from the available certificate credentials and the specified resource.
|
||||
func (settings FileSettings) ClientCertificateAuthorizerWithResource(resource string) (autorest.Authorizer, error) {
|
||||
cfg, err := settings.clientCertificateConfigWithResource(resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cfg.Authorizer()
|
||||
}
|
||||
|
||||
func decode(b []byte) ([]byte, error) {
|
||||
@@ -175,7 +465,7 @@ func decode(b []byte) ([]byte, error) {
|
||||
return ioutil.ReadAll(reader)
|
||||
}
|
||||
|
||||
func getResourceForToken(f file, baseURI string) (string, error) {
|
||||
func (settings FileSettings) getResourceForToken(baseURI string) (string, error) {
|
||||
// Compare dafault base URI from the SDK to the endpoints from the public cloud
|
||||
// Base URI and token resource are the same string. This func finds the authentication
|
||||
// file field that matches the SDK base URI. The SDK defines the public cloud
|
||||
@@ -185,15 +475,15 @@ func getResourceForToken(f file, baseURI string) (string, error) {
|
||||
}
|
||||
switch baseURI {
|
||||
case azure.PublicCloud.ServiceManagementEndpoint:
|
||||
return f.ManagementEndpoint, nil
|
||||
return settings.Values[ManagementEndpoint], nil
|
||||
case azure.PublicCloud.ResourceManagerEndpoint:
|
||||
return f.ResourceManagerEndpoint, nil
|
||||
return settings.Values[ResourceManagerEndpoint], nil
|
||||
case azure.PublicCloud.ActiveDirectoryEndpoint:
|
||||
return f.ActiveDirectoryEndpoint, nil
|
||||
return settings.Values[ActiveDirectoryEndpoint], nil
|
||||
case azure.PublicCloud.GalleryEndpoint:
|
||||
return f.GalleryEndpoint, nil
|
||||
return settings.Values[GalleryEndpoint], nil
|
||||
case azure.PublicCloud.GraphEndpoint:
|
||||
return f.GraphResourceID, nil
|
||||
return settings.Values[GraphResourceID], nil
|
||||
}
|
||||
return "", fmt.Errorf("auth: base URI not found in endpoints")
|
||||
}
|
||||
@@ -264,23 +554,43 @@ type ClientCredentialsConfig struct {
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
TenantID string
|
||||
AuxTenants []string
|
||||
AADEndpoint string
|
||||
Resource string
|
||||
}
|
||||
|
||||
// Authorizer gets the authorizer from client credentials.
|
||||
func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {
|
||||
// ServicePrincipalToken creates a ServicePrincipalToken from client credentials.
|
||||
func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)
|
||||
}
|
||||
|
||||
spToken, err := adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)
|
||||
// MultiTenantServicePrincipalToken creates a MultiTenantServicePrincipalToken from client credentials.
|
||||
func (ccc ClientCredentialsConfig) MultiTenantServicePrincipalToken() (*adal.MultiTenantServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewMultiTenantOAuthConfig(ccc.AADEndpoint, ccc.TenantID, ccc.AuxTenants, adal.OAuthOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get oauth token from client credentials: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return adal.NewMultiTenantServicePrincipalToken(oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)
|
||||
}
|
||||
|
||||
return autorest.NewBearerAuthorizer(spToken), nil
|
||||
// Authorizer gets the authorizer from client credentials.
|
||||
func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {
|
||||
if len(ccc.AuxTenants) == 0 {
|
||||
spToken, err := ccc.ServicePrincipalToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get SPT from client credentials: %v", err)
|
||||
}
|
||||
return autorest.NewBearerAuthorizer(spToken), nil
|
||||
}
|
||||
mtSPT, err := ccc.MultiTenantServicePrincipalToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get multitenant SPT from client credentials: %v", err)
|
||||
}
|
||||
return autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil
|
||||
}
|
||||
|
||||
// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate.
|
||||
@@ -293,26 +603,29 @@ type ClientCertificateConfig struct {
|
||||
Resource string
|
||||
}
|
||||
|
||||
// Authorizer gets an authorizer object from client certificate.
|
||||
func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) {
|
||||
// ServicePrincipalToken creates a ServicePrincipalToken from client certificate.
|
||||
func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certData, err := ioutil.ReadFile(ccc.CertificatePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err)
|
||||
}
|
||||
|
||||
certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource)
|
||||
}
|
||||
|
||||
spToken, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource)
|
||||
|
||||
// Authorizer gets an authorizer object from client certificate.
|
||||
func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) {
|
||||
spToken, err := ccc.ServicePrincipalToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err)
|
||||
}
|
||||
|
||||
return autorest.NewBearerAuthorizer(spToken), nil
|
||||
}
|
||||
|
||||
@@ -326,26 +639,30 @@ type DeviceFlowConfig struct {
|
||||
|
||||
// Authorizer gets the authorizer from device flow.
|
||||
func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) {
|
||||
oauthClient := &autorest.Client{}
|
||||
spToken, err := dfc.ServicePrincipalToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err)
|
||||
}
|
||||
return autorest.NewBearerAuthorizer(spToken), nil
|
||||
}
|
||||
|
||||
// ServicePrincipalToken gets the service principal token from device flow.
|
||||
func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID)
|
||||
deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.AADEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oauthClient := &autorest.Client{}
|
||||
deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start device auth flow: %s", err)
|
||||
}
|
||||
|
||||
log.Println(*deviceCode.Message)
|
||||
|
||||
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to finish device auth flow: %s", err)
|
||||
}
|
||||
|
||||
spToken, err := adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err)
|
||||
}
|
||||
|
||||
return autorest.NewBearerAuthorizer(spToken), nil
|
||||
return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token)
|
||||
}
|
||||
|
||||
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
@@ -372,17 +689,21 @@ type UsernamePasswordConfig struct {
|
||||
Resource string
|
||||
}
|
||||
|
||||
// ServicePrincipalToken creates a ServicePrincipalToken from username and password.
|
||||
func (ups UsernamePasswordConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource)
|
||||
}
|
||||
|
||||
// Authorizer gets the authorizer from a username and a password.
|
||||
func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {
|
||||
|
||||
oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID)
|
||||
|
||||
spToken, err := adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource)
|
||||
|
||||
spToken, err := ups.ServicePrincipalToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err)
|
||||
}
|
||||
|
||||
return autorest.NewBearerAuthorizer(spToken), nil
|
||||
}
|
||||
|
||||
@@ -399,9 +720,17 @@ func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spToken, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err)
|
||||
var spToken *adal.ServicePrincipalToken
|
||||
if mc.ClientID == "" {
|
||||
spToken, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err)
|
||||
}
|
||||
} else {
|
||||
spToken, err = adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, mc.Resource, mc.ClientID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get oauth token from MSI for user assigned identity: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return autorest.NewBearerAuthorizer(spToken), nil
|
||||
|
75
vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
generated
vendored
75
vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
generated
vendored
@@ -44,11 +44,12 @@ const (
|
||||
// ServiceError encapsulates the error response from an Azure service.
|
||||
// It adhears to the OData v4 specification for error responses.
|
||||
type ServiceError struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Target *string `json:"target"`
|
||||
Details []map[string]interface{} `json:"details"`
|
||||
InnerError map[string]interface{} `json:"innererror"`
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Target *string `json:"target"`
|
||||
Details []map[string]interface{} `json:"details"`
|
||||
InnerError map[string]interface{} `json:"innererror"`
|
||||
AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
|
||||
}
|
||||
|
||||
func (se ServiceError) Error() string {
|
||||
@@ -74,6 +75,14 @@ func (se ServiceError) Error() string {
|
||||
result += fmt.Sprintf(" InnerError=%v", string(d))
|
||||
}
|
||||
|
||||
if se.AdditionalInfo != nil {
|
||||
d, err := json.Marshal(se.AdditionalInfo)
|
||||
if err != nil {
|
||||
result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo)
|
||||
}
|
||||
result += fmt.Sprintf(" AdditionalInfo=%v", string(d))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -86,44 +95,47 @@ func (se *ServiceError) UnmarshalJSON(b []byte) error {
|
||||
// http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
|
||||
|
||||
type serviceError1 struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Target *string `json:"target"`
|
||||
Details []map[string]interface{} `json:"details"`
|
||||
InnerError map[string]interface{} `json:"innererror"`
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Target *string `json:"target"`
|
||||
Details []map[string]interface{} `json:"details"`
|
||||
InnerError map[string]interface{} `json:"innererror"`
|
||||
AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
|
||||
}
|
||||
|
||||
type serviceError2 struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Target *string `json:"target"`
|
||||
Details map[string]interface{} `json:"details"`
|
||||
InnerError map[string]interface{} `json:"innererror"`
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Target *string `json:"target"`
|
||||
Details map[string]interface{} `json:"details"`
|
||||
InnerError map[string]interface{} `json:"innererror"`
|
||||
AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
|
||||
}
|
||||
|
||||
se1 := serviceError1{}
|
||||
err := json.Unmarshal(b, &se1)
|
||||
if err == nil {
|
||||
se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError)
|
||||
se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
se2 := serviceError2{}
|
||||
err = json.Unmarshal(b, &se2)
|
||||
if err == nil {
|
||||
se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError)
|
||||
se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo)
|
||||
se.Details = append(se.Details, se2.Details)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}) {
|
||||
func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) {
|
||||
se.Code = code
|
||||
se.Message = message
|
||||
se.Target = target
|
||||
se.Details = details
|
||||
se.InnerError = inner
|
||||
se.AdditionalInfo = additional
|
||||
}
|
||||
|
||||
// RequestError describes an error response returned by Azure service.
|
||||
@@ -279,16 +291,29 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
|
||||
resp.Body = ioutil.NopCloser(&b)
|
||||
if decodeErr != nil {
|
||||
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
|
||||
} else if e.ServiceError == nil {
|
||||
}
|
||||
if e.ServiceError == nil {
|
||||
// Check if error is unwrapped ServiceError
|
||||
if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" {
|
||||
e.ServiceError = &ServiceError{
|
||||
Code: "Unknown",
|
||||
Message: "Unknown service error",
|
||||
}
|
||||
if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if e.ServiceError.Message == "" {
|
||||
// if we're here it means the returned error wasn't OData v4 compliant.
|
||||
// try to unmarshal the body as raw JSON in hopes of getting something.
|
||||
rawBody := map[string]interface{}{}
|
||||
if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil {
|
||||
return err
|
||||
}
|
||||
e.ServiceError = &ServiceError{
|
||||
Code: "Unknown",
|
||||
Message: "Unknown service error",
|
||||
}
|
||||
if len(rawBody) > 0 {
|
||||
e.ServiceError.Details = []map[string]interface{}{rawBody}
|
||||
}
|
||||
}
|
||||
e.Response = resp
|
||||
e.RequestID = ExtractRequestID(resp)
|
||||
if e.StatusCode == nil {
|
||||
e.StatusCode = resp.StatusCode
|
||||
|
79
vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go
generated
vendored
Normal file
79
vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
package cli
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/dimchansky/utfbom"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
// Profile represents a Profile from the Azure CLI
|
||||
type Profile struct {
|
||||
InstallationID string `json:"installationId"`
|
||||
Subscriptions []Subscription `json:"subscriptions"`
|
||||
}
|
||||
|
||||
// Subscription represents a Subscription from the Azure CLI
|
||||
type Subscription struct {
|
||||
EnvironmentName string `json:"environmentName"`
|
||||
ID string `json:"id"`
|
||||
IsDefault bool `json:"isDefault"`
|
||||
Name string `json:"name"`
|
||||
State string `json:"state"`
|
||||
TenantID string `json:"tenantId"`
|
||||
User *User `json:"user"`
|
||||
}
|
||||
|
||||
// User represents a User from the Azure CLI
|
||||
type User struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
const azureProfileJSON = "azureProfile.json"
|
||||
|
||||
// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI
|
||||
func ProfilePath() (string, error) {
|
||||
if cfgDir := os.Getenv("AZURE_CONFIG_DIR"); cfgDir != "" {
|
||||
return filepath.Join(cfgDir, azureProfileJSON), nil
|
||||
}
|
||||
return homedir.Expand("~/.azure/" + azureProfileJSON)
|
||||
}
|
||||
|
||||
// LoadProfile restores a Profile object from a file located at 'path'.
|
||||
func LoadProfile(path string) (result Profile, err error) {
|
||||
var contents []byte
|
||||
contents, err = ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
|
||||
return
|
||||
}
|
||||
reader := utfbom.SkipOnly(bytes.NewReader(contents))
|
||||
|
||||
dec := json.NewDecoder(reader)
|
||||
if err = dec.Decode(&result); err != nil {
|
||||
err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
170
vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go
generated
vendored
Normal file
170
vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
package cli
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/date"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
// Token represents an AccessToken from the Azure CLI
|
||||
type Token struct {
|
||||
AccessToken string `json:"accessToken"`
|
||||
Authority string `json:"_authority"`
|
||||
ClientID string `json:"_clientId"`
|
||||
ExpiresOn string `json:"expiresOn"`
|
||||
IdentityProvider string `json:"identityProvider"`
|
||||
IsMRRT bool `json:"isMRRT"`
|
||||
RefreshToken string `json:"refreshToken"`
|
||||
Resource string `json:"resource"`
|
||||
TokenType string `json:"tokenType"`
|
||||
UserID string `json:"userId"`
|
||||
}
|
||||
|
||||
// ToADALToken converts an Azure CLI `Token`` to an `adal.Token``
|
||||
func (t Token) ToADALToken() (converted adal.Token, err error) {
|
||||
tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err)
|
||||
return
|
||||
}
|
||||
|
||||
difference := tokenExpirationDate.Sub(date.UnixEpoch())
|
||||
|
||||
converted = adal.Token{
|
||||
AccessToken: t.AccessToken,
|
||||
Type: t.TokenType,
|
||||
ExpiresIn: "3600",
|
||||
ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))),
|
||||
RefreshToken: t.RefreshToken,
|
||||
Resource: t.Resource,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AccessTokensPath returns the path where access tokens are stored from the Azure CLI
|
||||
// TODO(#199): add unit test.
|
||||
func AccessTokensPath() (string, error) {
|
||||
// Azure-CLI allows user to customize the path of access tokens thorugh environment variable.
|
||||
var accessTokenPath = os.Getenv("AZURE_ACCESS_TOKEN_FILE")
|
||||
var err error
|
||||
|
||||
// Fallback logic to default path on non-cloud-shell environment.
|
||||
// TODO(#200): remove the dependency on hard-coding path.
|
||||
if accessTokenPath == "" {
|
||||
accessTokenPath, err = homedir.Expand("~/.azure/accessTokens.json")
|
||||
}
|
||||
|
||||
return accessTokenPath, err
|
||||
}
|
||||
|
||||
// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object
|
||||
func ParseExpirationDate(input string) (*time.Time, error) {
|
||||
// CloudShell (and potentially the Azure CLI in future)
|
||||
expirationDate, cloudShellErr := time.Parse(time.RFC3339, input)
|
||||
if cloudShellErr != nil {
|
||||
// Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone)
|
||||
const cliFormat = "2006-01-02 15:04:05.999999"
|
||||
expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local)
|
||||
if cliErr == nil {
|
||||
return &expirationDate, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr)
|
||||
}
|
||||
|
||||
return &expirationDate, nil
|
||||
}
|
||||
|
||||
// LoadTokens restores a set of Token objects from a file located at 'path'.
|
||||
func LoadTokens(path string) ([]Token, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var tokens []Token
|
||||
|
||||
dec := json.NewDecoder(file)
|
||||
if err = dec.Decode(&tokens); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err)
|
||||
}
|
||||
|
||||
return tokens, nil
|
||||
}
|
||||
|
||||
// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios.
|
||||
func GetTokenFromCLI(resource string) (*Token, error) {
|
||||
// This is the path that a developer can set to tell this class what the install path for Azure CLI is.
|
||||
const azureCLIPath = "AzureCLIPath"
|
||||
|
||||
// The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI.
|
||||
azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles"))
|
||||
|
||||
// Default path for non-Windows.
|
||||
const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin"
|
||||
|
||||
// Validate resource, since it gets sent as a command line argument to Azure CLI
|
||||
const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed."
|
||||
match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !match {
|
||||
return nil, fmt.Errorf(invalidResourceErrorTemplate, resource)
|
||||
}
|
||||
|
||||
// Execute Azure CLI to get token
|
||||
var cliCmd *exec.Cmd
|
||||
if runtime.GOOS == "windows" {
|
||||
cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir")))
|
||||
cliCmd.Env = os.Environ()
|
||||
cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows))
|
||||
cliCmd.Args = append(cliCmd.Args, "/c", "az")
|
||||
} else {
|
||||
cliCmd = exec.Command("az")
|
||||
cliCmd.Env = os.Environ()
|
||||
cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath))
|
||||
}
|
||||
cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource)
|
||||
|
||||
var stderr bytes.Buffer
|
||||
cliCmd.Stderr = &stderr
|
||||
|
||||
output, err := cliCmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String())
|
||||
}
|
||||
|
||||
tokenResponse := Token{}
|
||||
err = json.Unmarshal(output, &tokenResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &tokenResponse, err
|
||||
}
|
105
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
105
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
@@ -22,9 +22,14 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// EnvironmentFilepathName captures the name of the environment variable containing the path to the file
|
||||
// to be used while populating the Azure Environment.
|
||||
const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH"
|
||||
const (
|
||||
// EnvironmentFilepathName captures the name of the environment variable containing the path to the file
|
||||
// to be used while populating the Azure Environment.
|
||||
EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH"
|
||||
|
||||
// NotAvailable is used for endpoints and resource IDs that are not available for a given cloud.
|
||||
NotAvailable = "N/A"
|
||||
)
|
||||
|
||||
var environments = map[string]Environment{
|
||||
"AZURECHINACLOUD": ChinaCloud,
|
||||
@@ -33,28 +38,40 @@ var environments = map[string]Environment{
|
||||
"AZUREUSGOVERNMENTCLOUD": USGovernmentCloud,
|
||||
}
|
||||
|
||||
// ResourceIdentifier contains a set of Azure resource IDs.
|
||||
type ResourceIdentifier struct {
|
||||
Graph string `json:"graph"`
|
||||
KeyVault string `json:"keyVault"`
|
||||
Datalake string `json:"datalake"`
|
||||
Batch string `json:"batch"`
|
||||
OperationalInsights string `json:"operationalInsights"`
|
||||
Storage string `json:"storage"`
|
||||
}
|
||||
|
||||
// Environment represents a set of endpoints for each of Azure's Clouds.
|
||||
type Environment struct {
|
||||
Name string `json:"name"`
|
||||
ManagementPortalURL string `json:"managementPortalURL"`
|
||||
PublishSettingsURL string `json:"publishSettingsURL"`
|
||||
ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
|
||||
ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
|
||||
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
|
||||
GalleryEndpoint string `json:"galleryEndpoint"`
|
||||
KeyVaultEndpoint string `json:"keyVaultEndpoint"`
|
||||
GraphEndpoint string `json:"graphEndpoint"`
|
||||
ServiceBusEndpoint string `json:"serviceBusEndpoint"`
|
||||
BatchManagementEndpoint string `json:"batchManagementEndpoint"`
|
||||
StorageEndpointSuffix string `json:"storageEndpointSuffix"`
|
||||
SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
|
||||
TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
|
||||
KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
|
||||
ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
|
||||
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
|
||||
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
|
||||
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
|
||||
TokenAudience string `json:"tokenAudience"`
|
||||
Name string `json:"name"`
|
||||
ManagementPortalURL string `json:"managementPortalURL"`
|
||||
PublishSettingsURL string `json:"publishSettingsURL"`
|
||||
ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
|
||||
ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
|
||||
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
|
||||
GalleryEndpoint string `json:"galleryEndpoint"`
|
||||
KeyVaultEndpoint string `json:"keyVaultEndpoint"`
|
||||
GraphEndpoint string `json:"graphEndpoint"`
|
||||
ServiceBusEndpoint string `json:"serviceBusEndpoint"`
|
||||
BatchManagementEndpoint string `json:"batchManagementEndpoint"`
|
||||
StorageEndpointSuffix string `json:"storageEndpointSuffix"`
|
||||
SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
|
||||
TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
|
||||
KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
|
||||
ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
|
||||
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
|
||||
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
|
||||
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
|
||||
CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"`
|
||||
TokenAudience string `json:"tokenAudience"`
|
||||
ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"`
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -79,7 +96,16 @@ var (
|
||||
ServiceManagementVMDNSSuffix: "cloudapp.net",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
CosmosDBDNSSuffix: "documents.azure.com",
|
||||
TokenAudience: "https://management.azure.com/",
|
||||
ResourceIdentifiers: ResourceIdentifier{
|
||||
Graph: "https://graph.windows.net/",
|
||||
KeyVault: "https://vault.azure.net",
|
||||
Datalake: "https://datalake.azure.net/",
|
||||
Batch: "https://batch.core.windows.net/",
|
||||
OperationalInsights: "https://api.loganalytics.io",
|
||||
Storage: "https://storage.azure.com/",
|
||||
},
|
||||
}
|
||||
|
||||
// USGovernmentCloud is the cloud environment for the US Government
|
||||
@@ -102,8 +128,17 @@ var (
|
||||
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
|
||||
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
ContainerRegistryDNSSuffix: "azurecr.us",
|
||||
CosmosDBDNSSuffix: "documents.azure.us",
|
||||
TokenAudience: "https://management.usgovcloudapi.net/",
|
||||
ResourceIdentifiers: ResourceIdentifier{
|
||||
Graph: "https://graph.windows.net/",
|
||||
KeyVault: "https://vault.usgovcloudapi.net",
|
||||
Datalake: NotAvailable,
|
||||
Batch: "https://batch.core.usgovcloudapi.net/",
|
||||
OperationalInsights: "https://api.loganalytics.us",
|
||||
Storage: "https://storage.azure.com/",
|
||||
},
|
||||
}
|
||||
|
||||
// ChinaCloud is the cloud environment operated in China
|
||||
@@ -126,8 +161,17 @@ var (
|
||||
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn",
|
||||
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
ContainerRegistryDNSSuffix: "azurecr.cn",
|
||||
CosmosDBDNSSuffix: "documents.azure.cn",
|
||||
TokenAudience: "https://management.chinacloudapi.cn/",
|
||||
ResourceIdentifiers: ResourceIdentifier{
|
||||
Graph: "https://graph.chinacloudapi.cn/",
|
||||
KeyVault: "https://vault.azure.cn",
|
||||
Datalake: NotAvailable,
|
||||
Batch: "https://batch.chinacloudapi.cn/",
|
||||
OperationalInsights: NotAvailable,
|
||||
Storage: "https://storage.azure.com/",
|
||||
},
|
||||
}
|
||||
|
||||
// GermanCloud is the cloud environment operated in Germany
|
||||
@@ -150,8 +194,17 @@ var (
|
||||
ServiceBusEndpointSuffix: "servicebus.cloudapi.de",
|
||||
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
ContainerRegistryDNSSuffix: NotAvailable,
|
||||
CosmosDBDNSSuffix: "documents.microsoftazure.de",
|
||||
TokenAudience: "https://management.microsoftazure.de/",
|
||||
ResourceIdentifiers: ResourceIdentifier{
|
||||
Graph: "https://graph.cloudapi.de/",
|
||||
KeyVault: "https://vault.microsoftazure.de",
|
||||
Datalake: NotAvailable,
|
||||
Batch: "https://batch.cloudapi.de/",
|
||||
OperationalInsights: NotAvailable,
|
||||
Storage: "https://storage.azure.com/",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
8
vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
generated
vendored
8
vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
generated
vendored
@@ -64,7 +64,7 @@ func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator {
|
||||
}
|
||||
}
|
||||
}
|
||||
return resp, fmt.Errorf("failed request: %s", err)
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -140,8 +140,8 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError
|
||||
}
|
||||
|
||||
// poll for registered provisioning state
|
||||
now := time.Now()
|
||||
for err == nil && time.Since(now) < client.PollingDuration {
|
||||
registrationStartTime := time.Now()
|
||||
for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) {
|
||||
// taken from the resources SDK
|
||||
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45
|
||||
preparer := autorest.CreatePreparer(
|
||||
@@ -183,7 +183,7 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError
|
||||
return originalReq.Context().Err()
|
||||
}
|
||||
}
|
||||
if !(time.Since(now) < client.PollingDuration) {
|
||||
if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) {
|
||||
return errors.New("polling for resource provider registration has exceeded the polling duration")
|
||||
}
|
||||
return err
|
||||
|
92
vendor/github.com/Azure/go-autorest/autorest/client.go
generated
vendored
92
vendor/github.com/Azure/go-autorest/autorest/client.go
generated
vendored
@@ -16,14 +16,18 @@ package autorest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/logger"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -41,15 +45,6 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// defaultUserAgent builds a string containing the Go version, system archityecture and OS,
|
||||
// and the go-autorest version.
|
||||
defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
|
||||
runtime.Version(),
|
||||
runtime.GOARCH,
|
||||
runtime.GOOS,
|
||||
Version(),
|
||||
)
|
||||
|
||||
// StatusCodesForRetry are a defined group of status code for which the client will retry
|
||||
StatusCodesForRetry = []int{
|
||||
http.StatusRequestTimeout, // 408
|
||||
@@ -78,6 +73,22 @@ type Response struct {
|
||||
*http.Response `json:"-"`
|
||||
}
|
||||
|
||||
// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code.
|
||||
// If there was no response (i.e. the underlying http.Response is nil) the return value is false.
|
||||
func (r Response) IsHTTPStatus(statusCode int) bool {
|
||||
if r.Response == nil {
|
||||
return false
|
||||
}
|
||||
return r.Response.StatusCode == statusCode
|
||||
}
|
||||
|
||||
// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes.
|
||||
// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided
|
||||
// the return value is false.
|
||||
func (r Response) HasHTTPStatus(statusCodes ...int) bool {
|
||||
return ResponseHasStatusCode(r.Response, statusCodes...)
|
||||
}
|
||||
|
||||
// LoggingInspector implements request and response inspectors that log the full request and
|
||||
// response to a supplied log.
|
||||
type LoggingInspector struct {
|
||||
@@ -153,6 +164,7 @@ type Client struct {
|
||||
PollingDelay time.Duration
|
||||
|
||||
// PollingDuration sets the maximum polling time after which an error is returned.
|
||||
// Setting this to zero will use the provided context to control the duration.
|
||||
PollingDuration time.Duration
|
||||
|
||||
// RetryAttempts sets the default number of retry attempts for client.
|
||||
@@ -174,14 +186,32 @@ type Client struct {
|
||||
// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
|
||||
// string.
|
||||
func NewClientWithUserAgent(ua string) Client {
|
||||
return newClient(ua, tls.RenegotiateNever)
|
||||
}
|
||||
|
||||
// ClientOptions contains various Client configuration options.
|
||||
type ClientOptions struct {
|
||||
// UserAgent is an optional user-agent string to append to the default user agent.
|
||||
UserAgent string
|
||||
|
||||
// Renegotiation is an optional setting to control client-side TLS renegotiation.
|
||||
Renegotiation tls.RenegotiationSupport
|
||||
}
|
||||
|
||||
// NewClientWithOptions returns an instance of a Client with the specified values.
|
||||
func NewClientWithOptions(options ClientOptions) Client {
|
||||
return newClient(options.UserAgent, options.Renegotiation)
|
||||
}
|
||||
|
||||
func newClient(ua string, renegotiation tls.RenegotiationSupport) Client {
|
||||
c := Client{
|
||||
PollingDelay: DefaultPollingDelay,
|
||||
PollingDuration: DefaultPollingDuration,
|
||||
RetryAttempts: DefaultRetryAttempts,
|
||||
RetryDuration: DefaultRetryDuration,
|
||||
UserAgent: defaultUserAgent,
|
||||
UserAgent: UserAgent(),
|
||||
}
|
||||
c.Sender = c.sender()
|
||||
c.Sender = c.sender(renegotiation)
|
||||
c.AddToUserAgent(ua)
|
||||
return c
|
||||
}
|
||||
@@ -216,18 +246,48 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
|
||||
}
|
||||
return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
|
||||
}
|
||||
|
||||
resp, err := SendWithSender(c.sender(), r)
|
||||
logger.Instance.WriteRequest(r, logger.Filter{
|
||||
Header: func(k string, v []string) (bool, []string) {
|
||||
// remove the auth token from the log
|
||||
if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") {
|
||||
v = []string{"**REDACTED**"}
|
||||
}
|
||||
return true, v
|
||||
},
|
||||
})
|
||||
resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r)
|
||||
logger.Instance.WriteResponse(resp, logger.Filter{})
|
||||
Respond(resp, c.ByInspecting())
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// sender returns the Sender to which to send requests.
|
||||
func (c Client) sender() Sender {
|
||||
func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender {
|
||||
if c.Sender == nil {
|
||||
// Use behaviour compatible with DefaultTransport, but require TLS minimum version.
|
||||
var defaultTransport = http.DefaultTransport.(*http.Transport)
|
||||
transport := tracing.Transport
|
||||
// for non-default values of TLS renegotiation create a new tracing transport.
|
||||
// updating tracing.Transport affects all clients which is not what we want.
|
||||
if renengotiation != tls.RenegotiateNever {
|
||||
transport = tracing.NewTransport()
|
||||
}
|
||||
transport.Base = &http.Transport{
|
||||
Proxy: defaultTransport.Proxy,
|
||||
DialContext: defaultTransport.DialContext,
|
||||
MaxIdleConns: defaultTransport.MaxIdleConns,
|
||||
IdleConnTimeout: defaultTransport.IdleConnTimeout,
|
||||
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
|
||||
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
|
||||
TLSClientConfig: &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
Renegotiation: renengotiation,
|
||||
},
|
||||
}
|
||||
j, _ := cookiejar.New(nil)
|
||||
return &http.Client{Jar: j}
|
||||
return &http.Client{Jar: j, Transport: transport}
|
||||
}
|
||||
|
||||
return c.Sender
|
||||
}
|
||||
|
||||
|
74
vendor/github.com/Azure/go-autorest/autorest/preparer.go
generated
vendored
74
vendor/github.com/Azure/go-autorest/autorest/preparer.go
generated
vendored
@@ -16,7 +16,9 @@ package autorest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -31,11 +33,33 @@ const (
|
||||
mimeTypeOctetStream = "application/octet-stream"
|
||||
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
||||
|
||||
headerAuthorization = "Authorization"
|
||||
headerContentType = "Content-Type"
|
||||
headerUserAgent = "User-Agent"
|
||||
headerAuthorization = "Authorization"
|
||||
headerAuxAuthorization = "x-ms-authorization-auxiliary"
|
||||
headerContentType = "Content-Type"
|
||||
headerUserAgent = "User-Agent"
|
||||
)
|
||||
|
||||
// used as a key type in context.WithValue()
|
||||
type ctxPrepareDecorators struct{}
|
||||
|
||||
// WithPrepareDecorators adds the specified PrepareDecorators to the provided context.
|
||||
// If no PrepareDecorators are provided the context is unchanged.
|
||||
func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context {
|
||||
if len(prepareDecorator) == 0 {
|
||||
return ctx
|
||||
}
|
||||
return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator)
|
||||
}
|
||||
|
||||
// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators.
|
||||
func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator {
|
||||
inCtx := ctx.Value(ctxPrepareDecorators{})
|
||||
if pd, ok := inCtx.([]PrepareDecorator); ok {
|
||||
return pd
|
||||
}
|
||||
return defaultPrepareDecorators
|
||||
}
|
||||
|
||||
// Preparer is the interface that wraps the Prepare method.
|
||||
//
|
||||
// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations
|
||||
@@ -190,6 +214,9 @@ func AsGet() PrepareDecorator { return WithMethod("GET") }
|
||||
// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD.
|
||||
func AsHead() PrepareDecorator { return WithMethod("HEAD") }
|
||||
|
||||
// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE.
|
||||
func AsMerge() PrepareDecorator { return WithMethod("MERGE") }
|
||||
|
||||
// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS.
|
||||
func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") }
|
||||
|
||||
@@ -225,6 +252,25 @@ func WithBaseURL(baseURL string) PrepareDecorator {
|
||||
}
|
||||
}
|
||||
|
||||
// WithBytes returns a PrepareDecorator that takes a list of bytes
|
||||
// which passes the bytes directly to the body
|
||||
func WithBytes(input *[]byte) PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
if input == nil {
|
||||
return r, fmt.Errorf("Input Bytes was nil")
|
||||
}
|
||||
|
||||
r.ContentLength = int64(len(*input))
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(*input))
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the
|
||||
// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map.
|
||||
func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator {
|
||||
@@ -377,6 +423,28 @@ func WithJSON(v interface{}) PrepareDecorator {
|
||||
}
|
||||
}
|
||||
|
||||
// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the
|
||||
// request and sets the Content-Length header.
|
||||
func WithXML(v interface{}) PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
b, err := xml.Marshal(v)
|
||||
if err == nil {
|
||||
// we have to tack on an XML header
|
||||
withHeader := xml.Header + string(b)
|
||||
bytesWithHeader := []byte(withHeader)
|
||||
|
||||
r.ContentLength = int64(len(bytesWithHeader))
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader))
|
||||
}
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path
|
||||
// is absolute (that is, it begins with a "/"), it replaces the existing path.
|
||||
func WithPath(path string) PrepareDecorator {
|
||||
|
19
vendor/github.com/Azure/go-autorest/autorest/responder.go
generated
vendored
19
vendor/github.com/Azure/go-autorest/autorest/responder.go
generated
vendored
@@ -153,6 +153,25 @@ func ByClosingIfError() RespondDecorator {
|
||||
}
|
||||
}
|
||||
|
||||
// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the
|
||||
// response Body into the value pointed to by v.
|
||||
func ByUnmarshallingBytes(v *[]byte) RespondDecorator {
|
||||
return func(r Responder) Responder {
|
||||
return ResponderFunc(func(resp *http.Response) error {
|
||||
err := r.Respond(resp)
|
||||
if err == nil {
|
||||
bytes, errInner := ioutil.ReadAll(resp.Body)
|
||||
if errInner != nil {
|
||||
err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
|
||||
} else {
|
||||
*v = bytes
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the
|
||||
// response Body into the value pointed to by v.
|
||||
func ByUnmarshallingJSON(v interface{}) RespondDecorator {
|
||||
|
137
vendor/github.com/Azure/go-autorest/autorest/sender.go
generated
vendored
137
vendor/github.com/Azure/go-autorest/autorest/sender.go
generated
vendored
@@ -15,14 +15,38 @@ package autorest
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
)
|
||||
|
||||
// used as a key type in context.WithValue()
|
||||
type ctxSendDecorators struct{}
|
||||
|
||||
// WithSendDecorators adds the specified SendDecorators to the provided context.
|
||||
// If no SendDecorators are provided the context is unchanged.
|
||||
func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context {
|
||||
if len(sendDecorator) == 0 {
|
||||
return ctx
|
||||
}
|
||||
return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator)
|
||||
}
|
||||
|
||||
// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators.
|
||||
func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator {
|
||||
inCtx := ctx.Value(ctxSendDecorators{})
|
||||
if sd, ok := inCtx.([]SendDecorator); ok {
|
||||
return sd
|
||||
}
|
||||
return defaultSendDecorators
|
||||
}
|
||||
|
||||
// Sender is the interface that wraps the Do method to send HTTP requests.
|
||||
//
|
||||
// The standard http.Client conforms to this interface.
|
||||
@@ -38,7 +62,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
|
||||
return sf(r)
|
||||
}
|
||||
|
||||
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
|
||||
// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
|
||||
// http.Request and pass it along or, first, pass the http.Request along then react to the
|
||||
// http.Response result.
|
||||
type SendDecorator func(Sender) Sender
|
||||
@@ -68,7 +92,7 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
|
||||
//
|
||||
// Send will not poll or retry requests.
|
||||
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
|
||||
return SendWithSender(&http.Client{}, r, decorators...)
|
||||
return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...)
|
||||
}
|
||||
|
||||
// SendWithSender sends the passed http.Request, through the provided Sender, returning the
|
||||
@@ -209,50 +233,77 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
|
||||
|
||||
// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified
|
||||
// number of attempts, exponentially backing off between requests using the supplied backoff
|
||||
// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
|
||||
// the http.Request.
|
||||
// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request.
|
||||
// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts.
|
||||
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||
rr := NewRetriableRequest(r)
|
||||
// Increment to add the first call (attempts denotes number of retries)
|
||||
attempts++
|
||||
for attempt := 0; attempt < attempts; {
|
||||
err = rr.Prepare()
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
resp, err = s.Do(rr.Request())
|
||||
// we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
|
||||
// resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
|
||||
if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
|
||||
return resp, err
|
||||
}
|
||||
delayed := DelayWithRetryAfter(resp, r.Context().Done())
|
||||
if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) {
|
||||
return nil, r.Context().Err()
|
||||
}
|
||||
// don't count a 429 against the number of attempts
|
||||
// so that we continue to retry until it succeeds
|
||||
if resp == nil || resp.StatusCode != http.StatusTooManyRequests {
|
||||
attempt++
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||
return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in
|
||||
// responses with status code 429
|
||||
// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the
|
||||
// specified number of attempts, exponentially backing off between requests using the supplied backoff
|
||||
// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater
|
||||
// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request.
|
||||
func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||
return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) {
|
||||
rr := NewRetriableRequest(r)
|
||||
// Increment to add the first call (attempts denotes number of retries)
|
||||
for attempt := 0; attempt < attempts+1; {
|
||||
err = rr.Prepare()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp, err = s.Do(rr.Request())
|
||||
// if the error isn't temporary don't bother retrying
|
||||
if err != nil && !IsTemporaryNetworkError(err) {
|
||||
return
|
||||
}
|
||||
// we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
|
||||
// resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
|
||||
if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
|
||||
return resp, err
|
||||
}
|
||||
delayed := DelayWithRetryAfter(resp, r.Context().Done())
|
||||
if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) {
|
||||
return resp, r.Context().Err()
|
||||
}
|
||||
// when count429 == false don't count a 429 against the number
|
||||
// of attempts so that we continue to retry until it succeeds
|
||||
if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) {
|
||||
attempt++
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header.
|
||||
// The value of Retry-After can be either the number of seconds or a date in RFC1123 format.
|
||||
// The function returns true after successfully waiting for the specified duration. If there is
|
||||
// no Retry-After header or the wait is cancelled the return value is false.
|
||||
func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After"))
|
||||
if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 {
|
||||
var dur time.Duration
|
||||
ra := resp.Header.Get("Retry-After")
|
||||
if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 {
|
||||
dur = time.Duration(retryAfter) * time.Second
|
||||
} else if t, err := time.Parse(time.RFC1123, ra); err == nil {
|
||||
dur = t.Sub(time.Now())
|
||||
}
|
||||
if dur > 0 {
|
||||
select {
|
||||
case <-time.After(time.Duration(retryAfter) * time.Second):
|
||||
case <-time.After(dur):
|
||||
return true
|
||||
case <-cancel:
|
||||
return false
|
||||
@@ -312,8 +363,22 @@ func WithLogging(logger *log.Logger) SendDecorator {
|
||||
// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
|
||||
// count.
|
||||
func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool {
|
||||
return DelayForBackoffWithCap(backoff, 0, attempt, cancel)
|
||||
}
|
||||
|
||||
// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of
|
||||
// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set
|
||||
// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap.
|
||||
// The delay may be canceled by closing the passed channel. If terminated early, returns false.
|
||||
// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
|
||||
// count.
|
||||
func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool {
|
||||
d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second
|
||||
if cap > 0 && d > cap {
|
||||
d = cap
|
||||
}
|
||||
select {
|
||||
case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second):
|
||||
case <-time.After(d):
|
||||
return true
|
||||
case <-cancel:
|
||||
return false
|
||||
|
5
vendor/github.com/Azure/go-autorest/autorest/to/convert.go
generated
vendored
5
vendor/github.com/Azure/go-autorest/autorest/to/convert.go
generated
vendored
@@ -145,3 +145,8 @@ func Float64(i *float64) float64 {
|
||||
func Float64Ptr(i float64) *float64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
// ByteSlicePtr returns a pointer to the passed byte slice.
|
||||
func ByteSlicePtr(b []byte) *[]byte {
|
||||
return &b
|
||||
}
|
||||
|
12
vendor/github.com/Azure/go-autorest/autorest/utility.go
generated
vendored
12
vendor/github.com/Azure/go-autorest/autorest/utility.go
generated
vendored
@@ -20,6 +20,7 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
@@ -156,7 +157,7 @@ func AsStringSlice(s interface{}) ([]string, error) {
|
||||
}
|
||||
|
||||
// String method converts interface v to string. If interface is a list, it
|
||||
// joins list elements using the seperator. Note that only sep[0] will be used for
|
||||
// joins list elements using the separator. Note that only sep[0] will be used for
|
||||
// joining if any separator is specified.
|
||||
func String(v interface{}, sep ...string) string {
|
||||
if len(sep) == 0 {
|
||||
@@ -216,3 +217,12 @@ func IsTokenRefreshError(err error) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false
|
||||
// if it's not. If the error doesn't implement the net.Error interface the return value is true.
|
||||
func IsTemporaryNetworkError(err error) bool {
|
||||
if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
48
vendor/github.com/Azure/go-autorest/autorest/validation/error.go
generated
vendored
Normal file
48
vendor/github.com/Azure/go-autorest/autorest/validation/error.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
package validation
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error is the type that's returned when the validation of an APIs arguments constraints fails.
|
||||
type Error struct {
|
||||
// PackageType is the package type of the object emitting the error. For types, the value
|
||||
// matches that produced the the '%T' format specifier of the fmt package. For other elements,
|
||||
// such as functions, it is just the package name (e.g., "autorest").
|
||||
PackageType string
|
||||
|
||||
// Method is the name of the method raising the error.
|
||||
Method string
|
||||
|
||||
// Message is the error message.
|
||||
Message string
|
||||
}
|
||||
|
||||
// Error returns a string containing the details of the validation failure.
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("%s#%s: Invalid input: %s", e.PackageType, e.Method, e.Message)
|
||||
}
|
||||
|
||||
// NewError creates a new Error object with the specified parameters.
|
||||
// message is treated as a format string to which the optional args apply.
|
||||
func NewError(packageType string, method string, message string, args ...interface{}) Error {
|
||||
return Error{
|
||||
PackageType: packageType,
|
||||
Method: method,
|
||||
Message: fmt.Sprintf(message, args...),
|
||||
}
|
||||
}
|
400
vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
generated
vendored
Normal file
400
vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
generated
vendored
Normal file
@@ -0,0 +1,400 @@
|
||||
/*
|
||||
Package validation provides methods for validating parameter value using reflection.
|
||||
*/
|
||||
package validation
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Constraint stores constraint name, target field name
|
||||
// Rule and chain validations.
|
||||
type Constraint struct {
|
||||
|
||||
// Target field name for validation.
|
||||
Target string
|
||||
|
||||
// Constraint name e.g. minLength, MaxLength, Pattern, etc.
|
||||
Name string
|
||||
|
||||
// Rule for constraint e.g. greater than 10, less than 5 etc.
|
||||
Rule interface{}
|
||||
|
||||
// Chain Validations for struct type
|
||||
Chain []Constraint
|
||||
}
|
||||
|
||||
// Validation stores parameter-wise validation.
|
||||
type Validation struct {
|
||||
TargetValue interface{}
|
||||
Constraints []Constraint
|
||||
}
|
||||
|
||||
// Constraint list
|
||||
const (
|
||||
Empty = "Empty"
|
||||
Null = "Null"
|
||||
ReadOnly = "ReadOnly"
|
||||
Pattern = "Pattern"
|
||||
MaxLength = "MaxLength"
|
||||
MinLength = "MinLength"
|
||||
MaxItems = "MaxItems"
|
||||
MinItems = "MinItems"
|
||||
MultipleOf = "MultipleOf"
|
||||
UniqueItems = "UniqueItems"
|
||||
InclusiveMaximum = "InclusiveMaximum"
|
||||
ExclusiveMaximum = "ExclusiveMaximum"
|
||||
ExclusiveMinimum = "ExclusiveMinimum"
|
||||
InclusiveMinimum = "InclusiveMinimum"
|
||||
)
|
||||
|
||||
// Validate method validates constraints on parameter
|
||||
// passed in validation array.
|
||||
func Validate(m []Validation) error {
|
||||
for _, item := range m {
|
||||
v := reflect.ValueOf(item.TargetValue)
|
||||
for _, constraint := range item.Constraints {
|
||||
var err error
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
err = validatePtr(v, constraint)
|
||||
case reflect.String:
|
||||
err = validateString(v, constraint)
|
||||
case reflect.Struct:
|
||||
err = validateStruct(v, constraint)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
err = validateInt(v, constraint)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
err = validateFloat(v, constraint)
|
||||
case reflect.Array, reflect.Slice, reflect.Map:
|
||||
err = validateArrayMap(v, constraint)
|
||||
default:
|
||||
err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind()))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateStruct(x reflect.Value, v Constraint, name ...string) error {
|
||||
//Get field name from target name which is in format a.b.c
|
||||
s := strings.Split(v.Target, ".")
|
||||
f := x.FieldByName(s[len(s)-1])
|
||||
if isZero(f) {
|
||||
return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target))
|
||||
}
|
||||
|
||||
return Validate([]Validation{
|
||||
{
|
||||
TargetValue: getInterfaceValue(f),
|
||||
Constraints: []Constraint{v},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func validatePtr(x reflect.Value, v Constraint) error {
|
||||
if v.Name == ReadOnly {
|
||||
if !x.IsNil() {
|
||||
return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if x.IsNil() {
|
||||
return checkNil(x, v)
|
||||
}
|
||||
if v.Chain != nil {
|
||||
return Validate([]Validation{
|
||||
{
|
||||
TargetValue: getInterfaceValue(x.Elem()),
|
||||
Constraints: v.Chain,
|
||||
},
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateInt(x reflect.Value, v Constraint) error {
|
||||
i := x.Int()
|
||||
r, ok := toInt64(v.Rule)
|
||||
if !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule))
|
||||
}
|
||||
switch v.Name {
|
||||
case MultipleOf:
|
||||
if i%r != 0 {
|
||||
return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r))
|
||||
}
|
||||
case ExclusiveMinimum:
|
||||
if i <= r {
|
||||
return createError(x, v, fmt.Sprintf("value must be greater than %v", r))
|
||||
}
|
||||
case ExclusiveMaximum:
|
||||
if i >= r {
|
||||
return createError(x, v, fmt.Sprintf("value must be less than %v", r))
|
||||
}
|
||||
case InclusiveMinimum:
|
||||
if i < r {
|
||||
return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r))
|
||||
}
|
||||
case InclusiveMaximum:
|
||||
if i > r {
|
||||
return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r))
|
||||
}
|
||||
default:
|
||||
return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateFloat(x reflect.Value, v Constraint) error {
|
||||
f := x.Float()
|
||||
r, ok := v.Rule.(float64)
|
||||
if !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule))
|
||||
}
|
||||
switch v.Name {
|
||||
case ExclusiveMinimum:
|
||||
if f <= r {
|
||||
return createError(x, v, fmt.Sprintf("value must be greater than %v", r))
|
||||
}
|
||||
case ExclusiveMaximum:
|
||||
if f >= r {
|
||||
return createError(x, v, fmt.Sprintf("value must be less than %v", r))
|
||||
}
|
||||
case InclusiveMinimum:
|
||||
if f < r {
|
||||
return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r))
|
||||
}
|
||||
case InclusiveMaximum:
|
||||
if f > r {
|
||||
return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r))
|
||||
}
|
||||
default:
|
||||
return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateString(x reflect.Value, v Constraint) error {
|
||||
s := x.String()
|
||||
switch v.Name {
|
||||
case Empty:
|
||||
if len(s) == 0 {
|
||||
return checkEmpty(x, v)
|
||||
}
|
||||
case Pattern:
|
||||
reg, err := regexp.Compile(v.Rule.(string))
|
||||
if err != nil {
|
||||
return createError(x, v, err.Error())
|
||||
}
|
||||
if !reg.MatchString(s) {
|
||||
return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule))
|
||||
}
|
||||
case MaxLength:
|
||||
if _, ok := v.Rule.(int); !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule))
|
||||
}
|
||||
if len(s) > v.Rule.(int) {
|
||||
return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule))
|
||||
}
|
||||
case MinLength:
|
||||
if _, ok := v.Rule.(int); !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule))
|
||||
}
|
||||
if len(s) < v.Rule.(int) {
|
||||
return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule))
|
||||
}
|
||||
case ReadOnly:
|
||||
if len(s) > 0 {
|
||||
return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request")
|
||||
}
|
||||
default:
|
||||
return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name))
|
||||
}
|
||||
|
||||
if v.Chain != nil {
|
||||
return Validate([]Validation{
|
||||
{
|
||||
TargetValue: getInterfaceValue(x),
|
||||
Constraints: v.Chain,
|
||||
},
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateArrayMap(x reflect.Value, v Constraint) error {
|
||||
switch v.Name {
|
||||
case Null:
|
||||
if x.IsNil() {
|
||||
return checkNil(x, v)
|
||||
}
|
||||
case Empty:
|
||||
if x.IsNil() || x.Len() == 0 {
|
||||
return checkEmpty(x, v)
|
||||
}
|
||||
case MaxItems:
|
||||
if _, ok := v.Rule.(int); !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule))
|
||||
}
|
||||
if x.Len() > v.Rule.(int) {
|
||||
return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len()))
|
||||
}
|
||||
case MinItems:
|
||||
if _, ok := v.Rule.(int); !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule))
|
||||
}
|
||||
if x.Len() < v.Rule.(int) {
|
||||
return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len()))
|
||||
}
|
||||
case UniqueItems:
|
||||
if x.Kind() == reflect.Array || x.Kind() == reflect.Slice {
|
||||
if !checkForUniqueInArray(x) {
|
||||
return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x))
|
||||
}
|
||||
} else if x.Kind() == reflect.Map {
|
||||
if !checkForUniqueInMap(x) {
|
||||
return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x))
|
||||
}
|
||||
} else {
|
||||
return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind()))
|
||||
}
|
||||
case ReadOnly:
|
||||
if x.Len() != 0 {
|
||||
return createError(x, v, "readonly parameter; must send as nil or empty in request")
|
||||
}
|
||||
case Pattern:
|
||||
reg, err := regexp.Compile(v.Rule.(string))
|
||||
if err != nil {
|
||||
return createError(x, v, err.Error())
|
||||
}
|
||||
keys := x.MapKeys()
|
||||
for _, k := range keys {
|
||||
if !reg.MatchString(k.String()) {
|
||||
return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule))
|
||||
}
|
||||
}
|
||||
default:
|
||||
return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name))
|
||||
}
|
||||
|
||||
if v.Chain != nil {
|
||||
return Validate([]Validation{
|
||||
{
|
||||
TargetValue: getInterfaceValue(x),
|
||||
Constraints: v.Chain,
|
||||
},
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkNil(x reflect.Value, v Constraint) error {
|
||||
if _, ok := v.Rule.(bool); !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule))
|
||||
}
|
||||
if v.Rule.(bool) {
|
||||
return createError(x, v, "value can not be null; required parameter")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkEmpty(x reflect.Value, v Constraint) error {
|
||||
if _, ok := v.Rule.(bool); !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule))
|
||||
}
|
||||
|
||||
if v.Rule.(bool) {
|
||||
return createError(x, v, "value can not be null or empty; required parameter")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkForUniqueInArray(x reflect.Value) bool {
|
||||
if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
arrOfInterface := make([]interface{}, x.Len())
|
||||
|
||||
for i := 0; i < x.Len(); i++ {
|
||||
arrOfInterface[i] = x.Index(i).Interface()
|
||||
}
|
||||
|
||||
m := make(map[interface{}]bool)
|
||||
for _, val := range arrOfInterface {
|
||||
if m[val] {
|
||||
return false
|
||||
}
|
||||
m[val] = true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func checkForUniqueInMap(x reflect.Value) bool {
|
||||
if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
mapOfInterface := make(map[interface{}]interface{}, x.Len())
|
||||
|
||||
keys := x.MapKeys()
|
||||
for _, k := range keys {
|
||||
mapOfInterface[k.Interface()] = x.MapIndex(k).Interface()
|
||||
}
|
||||
|
||||
m := make(map[interface{}]bool)
|
||||
for _, val := range mapOfInterface {
|
||||
if m[val] {
|
||||
return false
|
||||
}
|
||||
m[val] = true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getInterfaceValue(x reflect.Value) interface{} {
|
||||
if x.Kind() == reflect.Invalid {
|
||||
return nil
|
||||
}
|
||||
return x.Interface()
|
||||
}
|
||||
|
||||
func isZero(x interface{}) bool {
|
||||
return x == reflect.Zero(reflect.TypeOf(x)).Interface()
|
||||
}
|
||||
|
||||
func createError(x reflect.Value, v Constraint, err string) error {
|
||||
return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s",
|
||||
v.Target, v.Name, getInterfaceValue(x), err)
|
||||
}
|
||||
|
||||
func toInt64(v interface{}) (int64, bool) {
|
||||
if i64, ok := v.(int64); ok {
|
||||
return i64, true
|
||||
}
|
||||
// older generators emit max constants as int, so if int64 fails fall back to int
|
||||
if i32, ok := v.(int); ok {
|
||||
return int64(i32), true
|
||||
}
|
||||
return 0, false
|
||||
}
|
23
vendor/github.com/Azure/go-autorest/autorest/version.go
generated
vendored
23
vendor/github.com/Azure/go-autorest/autorest/version.go
generated
vendored
@@ -14,7 +14,28 @@ package autorest
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
const number = "v12.4.1"
|
||||
|
||||
var (
|
||||
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
|
||||
runtime.Version(),
|
||||
runtime.GOARCH,
|
||||
runtime.GOOS,
|
||||
number,
|
||||
)
|
||||
)
|
||||
|
||||
// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version.
|
||||
func UserAgent() string {
|
||||
return userAgent
|
||||
}
|
||||
|
||||
// Version returns the semantic version (see http://semver.org).
|
||||
func Version() string {
|
||||
return "v10.5.0"
|
||||
return number
|
||||
}
|
||||
|
328
vendor/github.com/Azure/go-autorest/logger/logger.go
generated
vendored
Normal file
328
vendor/github.com/Azure/go-autorest/logger/logger.go
generated
vendored
Normal file
@@ -0,0 +1,328 @@
|
||||
package logger
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LevelType tells a logger the minimum level to log. When code reports a log entry,
|
||||
// the LogLevel indicates the level of the log entry. The logger only records entries
|
||||
// whose level is at least the level it was told to log. See the Log* constants.
|
||||
// For example, if a logger is configured with LogError, then LogError, LogPanic,
|
||||
// and LogFatal entries will be logged; lower level entries are ignored.
|
||||
type LevelType uint32
|
||||
|
||||
const (
|
||||
// LogNone tells a logger not to log any entries passed to it.
|
||||
LogNone LevelType = iota
|
||||
|
||||
// LogFatal tells a logger to log all LogFatal entries passed to it.
|
||||
LogFatal
|
||||
|
||||
// LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
|
||||
LogPanic
|
||||
|
||||
// LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogError
|
||||
|
||||
// LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogWarning
|
||||
|
||||
// LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogInfo
|
||||
|
||||
// LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogDebug
|
||||
)
|
||||
|
||||
const (
|
||||
logNone = "NONE"
|
||||
logFatal = "FATAL"
|
||||
logPanic = "PANIC"
|
||||
logError = "ERROR"
|
||||
logWarning = "WARNING"
|
||||
logInfo = "INFO"
|
||||
logDebug = "DEBUG"
|
||||
logUnknown = "UNKNOWN"
|
||||
)
|
||||
|
||||
// ParseLevel converts the specified string into the corresponding LevelType.
|
||||
func ParseLevel(s string) (lt LevelType, err error) {
|
||||
switch strings.ToUpper(s) {
|
||||
case logFatal:
|
||||
lt = LogFatal
|
||||
case logPanic:
|
||||
lt = LogPanic
|
||||
case logError:
|
||||
lt = LogError
|
||||
case logWarning:
|
||||
lt = LogWarning
|
||||
case logInfo:
|
||||
lt = LogInfo
|
||||
case logDebug:
|
||||
lt = LogDebug
|
||||
default:
|
||||
err = fmt.Errorf("bad log level '%s'", s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String implements the stringer interface for LevelType.
|
||||
func (lt LevelType) String() string {
|
||||
switch lt {
|
||||
case LogNone:
|
||||
return logNone
|
||||
case LogFatal:
|
||||
return logFatal
|
||||
case LogPanic:
|
||||
return logPanic
|
||||
case LogError:
|
||||
return logError
|
||||
case LogWarning:
|
||||
return logWarning
|
||||
case LogInfo:
|
||||
return logInfo
|
||||
case LogDebug:
|
||||
return logDebug
|
||||
default:
|
||||
return logUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// Filter defines functions for filtering HTTP request/response content.
|
||||
type Filter struct {
|
||||
// URL returns a potentially modified string representation of a request URL.
|
||||
URL func(u *url.URL) string
|
||||
|
||||
// Header returns a potentially modified set of values for the specified key.
|
||||
// To completely exclude the header key/values return false.
|
||||
Header func(key string, val []string) (bool, []string)
|
||||
|
||||
// Body returns a potentially modified request/response body.
|
||||
Body func(b []byte) []byte
|
||||
}
|
||||
|
||||
func (f Filter) processURL(u *url.URL) string {
|
||||
if f.URL == nil {
|
||||
return u.String()
|
||||
}
|
||||
return f.URL(u)
|
||||
}
|
||||
|
||||
func (f Filter) processHeader(k string, val []string) (bool, []string) {
|
||||
if f.Header == nil {
|
||||
return true, val
|
||||
}
|
||||
return f.Header(k, val)
|
||||
}
|
||||
|
||||
func (f Filter) processBody(b []byte) []byte {
|
||||
if f.Body == nil {
|
||||
return b
|
||||
}
|
||||
return f.Body(b)
|
||||
}
|
||||
|
||||
// Writer defines methods for writing to a logging facility.
|
||||
type Writer interface {
|
||||
// Writeln writes the specified message with the standard log entry header and new-line character.
|
||||
Writeln(level LevelType, message string)
|
||||
|
||||
// Writef writes the specified format specifier with the standard log entry header and no new-line character.
|
||||
Writef(level LevelType, format string, a ...interface{})
|
||||
|
||||
// WriteRequest writes the specified HTTP request to the logger if the log level is greater than
|
||||
// or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher.
|
||||
// Custom filters can be specified to exclude URL, header, and/or body content from the log.
|
||||
// By default no request content is excluded.
|
||||
WriteRequest(req *http.Request, filter Filter)
|
||||
|
||||
// WriteResponse writes the specified HTTP response to the logger if the log level is greater than
|
||||
// or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher.
|
||||
// Custom filters can be specified to exclude URL, header, and/or body content from the log.
|
||||
// By default no response content is excluded.
|
||||
WriteResponse(resp *http.Response, filter Filter)
|
||||
}
|
||||
|
||||
// Instance is the default log writer initialized during package init.
|
||||
// This can be replaced with a custom implementation as required.
|
||||
var Instance Writer
|
||||
|
||||
// default log level
|
||||
var logLevel = LogNone
|
||||
|
||||
// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL.
|
||||
// If no value was specified the default value is LogNone.
|
||||
// Custom loggers can call this to retrieve the configured log level.
|
||||
func Level() LevelType {
|
||||
return logLevel
|
||||
}
|
||||
|
||||
func init() {
|
||||
// separated for testing purposes
|
||||
initDefaultLogger()
|
||||
}
|
||||
|
||||
func initDefaultLogger() {
|
||||
// init with nilLogger so callers don't have to do a nil check on Default
|
||||
Instance = nilLogger{}
|
||||
llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL"))
|
||||
if llStr == "" {
|
||||
return
|
||||
}
|
||||
var err error
|
||||
logLevel, err = ParseLevel(llStr)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error())
|
||||
return
|
||||
}
|
||||
if logLevel == LogNone {
|
||||
return
|
||||
}
|
||||
// default to stderr
|
||||
dest := os.Stderr
|
||||
lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE")
|
||||
if strings.EqualFold(lfStr, "stdout") {
|
||||
dest = os.Stdout
|
||||
} else if lfStr != "" {
|
||||
lf, err := os.Create(lfStr)
|
||||
if err == nil {
|
||||
dest = lf
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error())
|
||||
}
|
||||
}
|
||||
Instance = fileLogger{
|
||||
logLevel: logLevel,
|
||||
mu: &sync.Mutex{},
|
||||
logFile: dest,
|
||||
}
|
||||
}
|
||||
|
||||
// the nil logger does nothing
|
||||
type nilLogger struct{}
|
||||
|
||||
func (nilLogger) Writeln(LevelType, string) {}
|
||||
|
||||
func (nilLogger) Writef(LevelType, string, ...interface{}) {}
|
||||
|
||||
func (nilLogger) WriteRequest(*http.Request, Filter) {}
|
||||
|
||||
func (nilLogger) WriteResponse(*http.Response, Filter) {}
|
||||
|
||||
// A File is used instead of a Logger so the stream can be flushed after every write.
|
||||
type fileLogger struct {
|
||||
logLevel LevelType
|
||||
mu *sync.Mutex // for synchronizing writes to logFile
|
||||
logFile *os.File
|
||||
}
|
||||
|
||||
func (fl fileLogger) Writeln(level LevelType, message string) {
|
||||
fl.Writef(level, "%s\n", message)
|
||||
}
|
||||
|
||||
func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) {
|
||||
if fl.logLevel >= level {
|
||||
fl.mu.Lock()
|
||||
defer fl.mu.Unlock()
|
||||
fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...))
|
||||
fl.logFile.Sync()
|
||||
}
|
||||
}
|
||||
|
||||
func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) {
|
||||
if req == nil || fl.logLevel < LogInfo {
|
||||
return
|
||||
}
|
||||
b := &bytes.Buffer{}
|
||||
fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL))
|
||||
// dump headers
|
||||
for k, v := range req.Header {
|
||||
if ok, mv := filter.processHeader(k, v); ok {
|
||||
fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ","))
|
||||
}
|
||||
}
|
||||
if fl.shouldLogBody(req.Header, req.Body) {
|
||||
// dump body
|
||||
body, err := ioutil.ReadAll(req.Body)
|
||||
if err == nil {
|
||||
fmt.Fprintln(b, string(filter.processBody(body)))
|
||||
if nc, ok := req.Body.(io.Seeker); ok {
|
||||
// rewind to the beginning
|
||||
nc.Seek(0, io.SeekStart)
|
||||
} else {
|
||||
// recreate the body
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(body))
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(b, "failed to read body: %v\n", err)
|
||||
}
|
||||
}
|
||||
fl.mu.Lock()
|
||||
defer fl.mu.Unlock()
|
||||
fmt.Fprint(fl.logFile, b.String())
|
||||
fl.logFile.Sync()
|
||||
}
|
||||
|
||||
func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) {
|
||||
if resp == nil || fl.logLevel < LogInfo {
|
||||
return
|
||||
}
|
||||
b := &bytes.Buffer{}
|
||||
fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL))
|
||||
// dump headers
|
||||
for k, v := range resp.Header {
|
||||
if ok, mv := filter.processHeader(k, v); ok {
|
||||
fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ","))
|
||||
}
|
||||
}
|
||||
if fl.shouldLogBody(resp.Header, resp.Body) {
|
||||
// dump body
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
fmt.Fprintln(b, string(filter.processBody(body)))
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(body))
|
||||
} else {
|
||||
fmt.Fprintf(b, "failed to read body: %v\n", err)
|
||||
}
|
||||
}
|
||||
fl.mu.Lock()
|
||||
defer fl.mu.Unlock()
|
||||
fmt.Fprint(fl.logFile, b.String())
|
||||
fl.logFile.Sync()
|
||||
}
|
||||
|
||||
// returns true if the provided body should be included in the log
|
||||
func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool {
|
||||
ct := header.Get("Content-Type")
|
||||
return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream")
|
||||
}
|
||||
|
||||
// creates standard header for log entries, it contains a timestamp and the log level
|
||||
func entryHeader(level LevelType) string {
|
||||
// this format provides a fixed number of digits so the size of the timestamp is constant
|
||||
return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String())
|
||||
}
|
195
vendor/github.com/Azure/go-autorest/tracing/tracing.go
generated
vendored
Normal file
195
vendor/github.com/Azure/go-autorest/tracing/tracing.go
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
package tracing
|
||||
|
||||
// Copyright 2018 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"contrib.go.opencensus.io/exporter/ocagent"
|
||||
"go.opencensus.io/plugin/ochttp"
|
||||
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
// Transport is the default tracing RoundTripper. The custom options setter will control
|
||||
// if traces are being emitted or not.
|
||||
Transport = NewTransport()
|
||||
|
||||
// enabled is the flag for marking if tracing is enabled.
|
||||
enabled = false
|
||||
|
||||
// Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise
|
||||
// it will be using the parent sampler or the default.
|
||||
sampler = trace.NeverSample()
|
||||
|
||||
// Views for metric instrumentation.
|
||||
views = map[string]*view.View{}
|
||||
|
||||
// the trace exporter
|
||||
traceExporter trace.Exporter
|
||||
)
|
||||
|
||||
func init() {
|
||||
enableFromEnv()
|
||||
}
|
||||
|
||||
func enableFromEnv() {
|
||||
_, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED")
|
||||
_, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD")
|
||||
if ok || legacyOk {
|
||||
agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT")
|
||||
|
||||
if ok {
|
||||
EnableWithAIForwarding(agentEndpoint)
|
||||
} else {
|
||||
Enable()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewTransport returns a new instance of a tracing-aware RoundTripper.
|
||||
func NewTransport() *ochttp.Transport {
|
||||
return &ochttp.Transport{
|
||||
Propagation: &tracecontext.HTTPFormat{},
|
||||
GetStartOptions: getStartOptions,
|
||||
}
|
||||
}
|
||||
|
||||
// IsEnabled returns true if monitoring is enabled for the sdk.
|
||||
func IsEnabled() bool {
|
||||
return enabled
|
||||
}
|
||||
|
||||
// Enable will start instrumentation for metrics and traces.
|
||||
func Enable() error {
|
||||
enabled = true
|
||||
sampler = nil
|
||||
|
||||
err := initStats()
|
||||
return err
|
||||
}
|
||||
|
||||
// Disable will disable instrumentation for metrics and traces.
|
||||
func Disable() {
|
||||
disableStats()
|
||||
sampler = trace.NeverSample()
|
||||
if traceExporter != nil {
|
||||
trace.UnregisterExporter(traceExporter)
|
||||
}
|
||||
enabled = false
|
||||
}
|
||||
|
||||
// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder
|
||||
// exporter making the metrics and traces available in app insights.
|
||||
func EnableWithAIForwarding(agentEndpoint string) (err error) {
|
||||
err = Enable()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trace.RegisterExporter(traceExporter)
|
||||
return
|
||||
}
|
||||
|
||||
// getStartOptions is the custom options setter for the ochttp package.
|
||||
func getStartOptions(*http.Request) trace.StartOptions {
|
||||
return trace.StartOptions{
|
||||
Sampler: sampler,
|
||||
}
|
||||
}
|
||||
|
||||
// initStats registers the views for the http metrics
|
||||
func initStats() (err error) {
|
||||
clientViews := []*view.View{
|
||||
ochttp.ClientCompletedCount,
|
||||
ochttp.ClientRoundtripLatencyDistribution,
|
||||
ochttp.ClientReceivedBytesDistribution,
|
||||
ochttp.ClientSentBytesDistribution,
|
||||
}
|
||||
for _, cv := range clientViews {
|
||||
vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name)
|
||||
views[vn] = cv.WithName(vn)
|
||||
err = view.Register(views[vn])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// disableStats will unregister the previously registered metrics
|
||||
func disableStats() {
|
||||
for _, v := range views {
|
||||
view.Unregister(v)
|
||||
}
|
||||
}
|
||||
|
||||
// StartSpan starts a trace span
|
||||
func StartSpan(ctx context.Context, name string) context.Context {
|
||||
ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler))
|
||||
return ctx
|
||||
}
|
||||
|
||||
// EndSpan ends a previously started span stored in the context
|
||||
func EndSpan(ctx context.Context, httpStatusCode int, err error) {
|
||||
span := trace.FromContext(ctx)
|
||||
|
||||
if span == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)})
|
||||
}
|
||||
span.End()
|
||||
}
|
||||
|
||||
// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined
|
||||
// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status
|
||||
func toTraceStatusCode(httpStatusCode int) int32 {
|
||||
switch {
|
||||
case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest:
|
||||
return trace.StatusCodeOK
|
||||
case httpStatusCode == http.StatusBadRequest:
|
||||
return trace.StatusCodeInvalidArgument
|
||||
case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated.
|
||||
return trace.StatusCodeUnauthenticated
|
||||
case httpStatusCode == http.StatusForbidden:
|
||||
return trace.StatusCodePermissionDenied
|
||||
case httpStatusCode == http.StatusNotFound:
|
||||
return trace.StatusCodeNotFound
|
||||
case httpStatusCode == http.StatusTooManyRequests:
|
||||
return trace.StatusCodeResourceExhausted
|
||||
case httpStatusCode == 499:
|
||||
return trace.StatusCodeCancelled
|
||||
case httpStatusCode == http.StatusNotImplemented:
|
||||
return trace.StatusCodeUnimplemented
|
||||
case httpStatusCode == http.StatusServiceUnavailable:
|
||||
return trace.StatusCodeUnavailable
|
||||
case httpStatusCode == http.StatusGatewayTimeout:
|
||||
return trace.StatusCodeDeadlineExceeded
|
||||
default:
|
||||
return trace.StatusCodeUnknown
|
||||
}
|
||||
}
|
71
vendor/github.com/JamesClonk/vultr/lib/account_info.go
generated
vendored
71
vendor/github.com/JamesClonk/vultr/lib/account_info.go
generated
vendored
@@ -1,71 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// AccountInfo of Vultr account
|
||||
type AccountInfo struct {
|
||||
Balance float64 `json:"balance"`
|
||||
PendingCharges float64 `json:"pending_charges"`
|
||||
LastPaymentDate string `json:"last_payment_date"`
|
||||
LastPaymentAmount float64 `json:"last_payment_amount"`
|
||||
}
|
||||
|
||||
// GetAccountInfo retrieves the Vultr account information about current balance, pending charges, etc..
|
||||
func (c *Client) GetAccountInfo() (info AccountInfo, err error) {
|
||||
if err := c.get(`account/info`, &info); err != nil {
|
||||
return AccountInfo{}, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaller on AccountInfo.
|
||||
// This is needed because the Vultr API is inconsistent in it's JSON responses for account info.
|
||||
// Some fields can change type, from JSON number to JSON string and vice-versa.
|
||||
func (a *AccountInfo) UnmarshalJSON(data []byte) (err error) {
|
||||
if a == nil {
|
||||
*a = AccountInfo{}
|
||||
}
|
||||
|
||||
var fields map[string]interface{}
|
||||
if err := json.Unmarshal(data, &fields); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
value := fmt.Sprintf("%v", fields["balance"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
b, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Balance = b
|
||||
|
||||
value = fmt.Sprintf("%v", fields["pending_charges"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
pc, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.PendingCharges = pc
|
||||
|
||||
value = fmt.Sprintf("%v", fields["last_payment_amount"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
lpa, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.LastPaymentAmount = lpa
|
||||
|
||||
a.LastPaymentDate = fmt.Sprintf("%v", fields["last_payment_date"])
|
||||
|
||||
return
|
||||
}
|
38
vendor/github.com/JamesClonk/vultr/lib/applications.go
generated
vendored
38
vendor/github.com/JamesClonk/vultr/lib/applications.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Application on Vultr
|
||||
type Application struct {
|
||||
ID string `json:"APPID"`
|
||||
Name string `json:"name"`
|
||||
ShortName string `json:"short_name"`
|
||||
DeployName string `json:"deploy_name"`
|
||||
Surcharge float64 `json:"surcharge"`
|
||||
}
|
||||
|
||||
type applications []Application
|
||||
|
||||
func (s applications) Len() int { return len(s) }
|
||||
func (s applications) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s applications) Less(i, j int) bool {
|
||||
return strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name)
|
||||
}
|
||||
|
||||
// GetApplications returns a list of all available applications on Vultr
|
||||
func (c *Client) GetApplications() ([]Application, error) {
|
||||
var appMap map[string]Application
|
||||
if err := c.get(`app/list`, &appMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var appList []Application
|
||||
for _, app := range appMap {
|
||||
appList = append(appList, app)
|
||||
}
|
||||
sort.Sort(applications(appList))
|
||||
return appList, nil
|
||||
}
|
210
vendor/github.com/JamesClonk/vultr/lib/block_storage.go
generated
vendored
210
vendor/github.com/JamesClonk/vultr/lib/block_storage.go
generated
vendored
@@ -1,210 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BlockStorage on Vultr account
|
||||
type BlockStorage struct {
|
||||
ID string `json:"SUBID,string"`
|
||||
Name string `json:"label"`
|
||||
RegionID int `json:"DCID,string"`
|
||||
SizeGB int `json:"size_gb,string"`
|
||||
Created string `json:"date_created"`
|
||||
Cost string `json:"cost_per_month"`
|
||||
Status string `json:"status"`
|
||||
AttachedTo string `json:"attached_to_SUBID"`
|
||||
}
|
||||
|
||||
type blockstorages []BlockStorage
|
||||
|
||||
func (b blockstorages) Len() int { return len(b) }
|
||||
func (b blockstorages) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b blockstorages) Less(i, j int) bool {
|
||||
// sort order: name, size, status
|
||||
if strings.ToLower(b[i].Name) < strings.ToLower(b[j].Name) {
|
||||
return true
|
||||
} else if strings.ToLower(b[i].Name) > strings.ToLower(b[j].Name) {
|
||||
return false
|
||||
}
|
||||
if b[i].SizeGB < b[j].SizeGB {
|
||||
return true
|
||||
} else if b[i].SizeGB > b[j].SizeGB {
|
||||
return false
|
||||
}
|
||||
return b[i].Status < b[j].Status
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaller on BlockStorage.
|
||||
// This is needed because the Vultr API is inconsistent in it's JSON responses.
|
||||
// Some fields can change type, from JSON number to JSON string and vice-versa.
|
||||
func (b *BlockStorage) UnmarshalJSON(data []byte) (err error) {
|
||||
if b == nil {
|
||||
*b = BlockStorage{}
|
||||
}
|
||||
|
||||
var fields map[string]interface{}
|
||||
if err := json.Unmarshal(data, &fields); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
value := fmt.Sprintf("%v", fields["SUBID"])
|
||||
if len(value) == 0 || value == "<nil>" || value == "0" {
|
||||
b.ID = ""
|
||||
} else {
|
||||
id, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.ID = strconv.FormatFloat(id, 'f', -1, 64)
|
||||
}
|
||||
|
||||
value = fmt.Sprintf("%v", fields["DCID"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
region, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.RegionID = int(region)
|
||||
|
||||
value = fmt.Sprintf("%v", fields["size_gb"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
size, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.SizeGB = int(size)
|
||||
|
||||
value = fmt.Sprintf("%v", fields["attached_to_SUBID"])
|
||||
if len(value) == 0 || value == "<nil>" || value == "0" {
|
||||
b.AttachedTo = ""
|
||||
} else {
|
||||
attached, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.AttachedTo = strconv.FormatFloat(attached, 'f', -1, 64)
|
||||
}
|
||||
|
||||
b.Name = fmt.Sprintf("%v", fields["label"])
|
||||
b.Created = fmt.Sprintf("%v", fields["date_created"])
|
||||
b.Status = fmt.Sprintf("%v", fields["status"])
|
||||
b.Cost = fmt.Sprintf("%v", fields["cost_per_month"])
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetBlockStorages returns a list of all active block storages on Vultr account
|
||||
func (c *Client) GetBlockStorages() (storages []BlockStorage, err error) {
|
||||
if err := c.get(`block/list`, &storages); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sort.Sort(blockstorages(storages))
|
||||
return storages, nil
|
||||
}
|
||||
|
||||
// GetBlockStorage returns block storage with given ID
|
||||
func (c *Client) GetBlockStorage(id string) (BlockStorage, error) {
|
||||
storages, err := c.GetBlockStorages()
|
||||
if err != nil {
|
||||
return BlockStorage{}, err
|
||||
}
|
||||
|
||||
for _, s := range storages {
|
||||
if s.ID == id {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
return BlockStorage{}, fmt.Errorf("BlockStorage with ID %v not found", id)
|
||||
}
|
||||
|
||||
// CreateBlockStorage creates a new block storage on Vultr account
|
||||
func (c *Client) CreateBlockStorage(name string, regionID, size int) (BlockStorage, error) {
|
||||
values := url.Values{
|
||||
"label": {name},
|
||||
"DCID": {fmt.Sprintf("%v", regionID)},
|
||||
"size_gb": {fmt.Sprintf("%v", size)},
|
||||
}
|
||||
|
||||
var storage BlockStorage
|
||||
if err := c.post(`block/create`, values, &storage); err != nil {
|
||||
return BlockStorage{}, err
|
||||
}
|
||||
storage.RegionID = regionID
|
||||
storage.Name = name
|
||||
storage.SizeGB = size
|
||||
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// ResizeBlockStorage resizes an existing block storage
|
||||
func (c *Client) ResizeBlockStorage(id string, size int) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"size_gb": {fmt.Sprintf("%v", size)},
|
||||
}
|
||||
|
||||
if err := c.post(`block/resize`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LabelBlockStorage changes the label on an existing block storage
|
||||
func (c *Client) LabelBlockStorage(id, name string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"label": {name},
|
||||
}
|
||||
|
||||
if err := c.post(`block/label_set`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttachBlockStorage attaches block storage to an existing virtual machine
|
||||
func (c *Client) AttachBlockStorage(id, serverID string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"attach_to_SUBID": {serverID},
|
||||
}
|
||||
|
||||
if err := c.post(`block/attach`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachBlockStorage detaches block storage from virtual machine
|
||||
func (c *Client) DetachBlockStorage(id string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
}
|
||||
|
||||
if err := c.post(`block/detach`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteBlockStorage deletes an existing block storage
|
||||
func (c *Client) DeleteBlockStorage(id string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
}
|
||||
|
||||
if err := c.post(`block/delete`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
249
vendor/github.com/JamesClonk/vultr/lib/client.go
generated
vendored
249
vendor/github.com/JamesClonk/vultr/lib/client.go
generated
vendored
@@ -1,249 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
const (
|
||||
// Version of this libary
|
||||
Version = "1.13.0"
|
||||
|
||||
// APIVersion of Vultr
|
||||
APIVersion = "v1"
|
||||
|
||||
// DefaultEndpoint to be used
|
||||
DefaultEndpoint = "https://api.vultr.com/"
|
||||
|
||||
mediaType = "application/json"
|
||||
)
|
||||
|
||||
// retryableStatusCodes are API response status codes that indicate that
|
||||
// the failed request can be retried without further actions.
|
||||
var retryableStatusCodes = map[int]struct{}{
|
||||
503: {}, // Rate limit hit
|
||||
500: {}, // Internal server error. Try again at a later time.
|
||||
}
|
||||
|
||||
// Client represents the Vultr API client
|
||||
type Client struct {
|
||||
// HTTP client for communication with the Vultr API
|
||||
client *http.Client
|
||||
|
||||
// User agent for HTTP client
|
||||
UserAgent string
|
||||
|
||||
// Endpoint URL for API requests
|
||||
Endpoint *url.URL
|
||||
|
||||
// API key for accessing the Vultr API
|
||||
APIKey string
|
||||
|
||||
// Max. number of request attempts
|
||||
MaxAttempts int
|
||||
|
||||
// Throttling struct
|
||||
bucket *ratelimit.Bucket
|
||||
}
|
||||
|
||||
// Options represents optional settings and flags that can be passed to NewClient
|
||||
type Options struct {
|
||||
// HTTP client for communication with the Vultr API
|
||||
HTTPClient *http.Client
|
||||
|
||||
// User agent for HTTP client
|
||||
UserAgent string
|
||||
|
||||
// Endpoint URL for API requests
|
||||
Endpoint string
|
||||
|
||||
// API rate limitation, calls per duration
|
||||
RateLimitation time.Duration
|
||||
|
||||
// Max. number of times to retry API calls
|
||||
MaxRetries int
|
||||
}
|
||||
|
||||
// NewClient creates new Vultr API client. Options are optional and can be nil.
|
||||
func NewClient(apiKey string, options *Options) *Client {
|
||||
userAgent := "vultr-go/" + Version
|
||||
transport := &http.Transport{
|
||||
TLSNextProto: make(map[string]func(string, *tls.Conn) http.RoundTripper),
|
||||
}
|
||||
client := http.DefaultClient
|
||||
client.Transport = transport
|
||||
endpoint, _ := url.Parse(DefaultEndpoint)
|
||||
rate := 505 * time.Millisecond
|
||||
attempts := 1
|
||||
|
||||
if options != nil {
|
||||
if options.HTTPClient != nil {
|
||||
client = options.HTTPClient
|
||||
}
|
||||
if options.UserAgent != "" {
|
||||
userAgent = options.UserAgent
|
||||
}
|
||||
if options.Endpoint != "" {
|
||||
endpoint, _ = url.Parse(options.Endpoint)
|
||||
}
|
||||
if options.RateLimitation != 0 {
|
||||
rate = options.RateLimitation
|
||||
}
|
||||
if options.MaxRetries != 0 {
|
||||
attempts = options.MaxRetries + 1
|
||||
}
|
||||
}
|
||||
|
||||
return &Client{
|
||||
UserAgent: userAgent,
|
||||
client: client,
|
||||
Endpoint: endpoint,
|
||||
APIKey: apiKey,
|
||||
MaxAttempts: attempts,
|
||||
bucket: ratelimit.NewBucket(rate, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func apiPath(path string) string {
|
||||
return fmt.Sprintf("/%s/%s", APIVersion, path)
|
||||
}
|
||||
|
||||
func apiKeyPath(path, apiKey string) string {
|
||||
if strings.Contains(path, "?") {
|
||||
return path + "&api_key=" + apiKey
|
||||
}
|
||||
return path + "?api_key=" + apiKey
|
||||
}
|
||||
|
||||
func (c *Client) get(path string, data interface{}) error {
|
||||
req, err := c.newRequest("GET", apiPath(path), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.do(req, data)
|
||||
}
|
||||
|
||||
func (c *Client) post(path string, values url.Values, data interface{}) error {
|
||||
req, err := c.newRequest("POST", apiPath(path), strings.NewReader(values.Encode()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.do(req, data)
|
||||
}
|
||||
|
||||
func (c *Client) newRequest(method string, path string, body io.Reader) (*http.Request, error) {
|
||||
relPath, err := url.Parse(apiKeyPath(path, c.APIKey))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := c.Endpoint.ResolveReference(relPath)
|
||||
|
||||
req, err := http.NewRequest(method, url.String(), body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Add("User-Agent", c.UserAgent)
|
||||
req.Header.Add("Accept", mediaType)
|
||||
|
||||
if req.Method == "POST" {
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (c *Client) do(req *http.Request, data interface{}) error {
|
||||
// Throttle http requests to avoid hitting Vultr's API rate-limit
|
||||
c.bucket.Wait(1)
|
||||
|
||||
// Request body gets drained on each read so we
|
||||
// need to save it's content for retrying requests
|
||||
var err error
|
||||
var requestBody []byte
|
||||
if req.Body != nil {
|
||||
requestBody, err = ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading request body: %v", err)
|
||||
}
|
||||
req.Body.Close()
|
||||
}
|
||||
|
||||
var apiError error
|
||||
for tryCount := 1; tryCount <= c.MaxAttempts; tryCount++ {
|
||||
// Restore request body to the original state
|
||||
if requestBody != nil {
|
||||
req.Body = ioutil.NopCloser(bytes.NewBuffer(requestBody))
|
||||
}
|
||||
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
if data != nil {
|
||||
// avoid unmarshalling problem because Vultr API returns
|
||||
// empty array instead of empty map when it shouldn't!
|
||||
if string(body) == `[]` {
|
||||
data = nil
|
||||
} else {
|
||||
if err := json.Unmarshal(body, data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
apiError = errors.New(string(body))
|
||||
if !isCodeRetryable(resp.StatusCode) {
|
||||
break
|
||||
}
|
||||
|
||||
delay := backoffDuration(tryCount)
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
return apiError
|
||||
}
|
||||
|
||||
// backoffDuration returns the duration to wait before retrying the request.
|
||||
// Duration is an exponential function of the retry count with a jitter of ~0-30%.
|
||||
func backoffDuration(retryCount int) time.Duration {
|
||||
// Upper limit of delay at ~1 minute
|
||||
if retryCount > 7 {
|
||||
retryCount = 7
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
delay := (1 << uint(retryCount)) * (rand.Intn(150) + 500)
|
||||
return time.Duration(delay) * time.Millisecond
|
||||
}
|
||||
|
||||
// isCodeRetryable returns true if the given status code means that we should retry.
|
||||
func isCodeRetryable(statusCode int) bool {
|
||||
if _, ok := retryableStatusCodes[statusCode]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
150
vendor/github.com/JamesClonk/vultr/lib/dns.go
generated
vendored
150
vendor/github.com/JamesClonk/vultr/lib/dns.go
generated
vendored
@@ -1,150 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DNSDomain represents a DNS domain on Vultr
|
||||
type DNSDomain struct {
|
||||
Domain string `json:"domain"`
|
||||
Created string `json:"date_created"`
|
||||
}
|
||||
|
||||
type dnsdomains []DNSDomain
|
||||
|
||||
func (d dnsdomains) Len() int { return len(d) }
|
||||
func (d dnsdomains) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
func (d dnsdomains) Less(i, j int) bool {
|
||||
return strings.ToLower(d[i].Domain) < strings.ToLower(d[j].Domain)
|
||||
}
|
||||
|
||||
// DNSRecord represents a DNS record on Vultr
|
||||
type DNSRecord struct {
|
||||
RecordID int `json:"RECORDID"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Data string `json:"data"`
|
||||
Priority int `json:"priority"`
|
||||
TTL int `json:"ttl"`
|
||||
}
|
||||
|
||||
type dnsrecords []DNSRecord
|
||||
|
||||
func (d dnsrecords) Len() int { return len(d) }
|
||||
func (d dnsrecords) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
func (d dnsrecords) Less(i, j int) bool {
|
||||
// sort order: type, data, name
|
||||
if d[i].Type < d[j].Type {
|
||||
return true
|
||||
} else if d[i].Type > d[j].Type {
|
||||
return false
|
||||
}
|
||||
if d[i].Data < d[j].Data {
|
||||
return true
|
||||
} else if d[i].Data > d[j].Data {
|
||||
return false
|
||||
}
|
||||
return strings.ToLower(d[i].Name) < strings.ToLower(d[j].Name)
|
||||
}
|
||||
|
||||
// GetDNSDomains returns a list of available domains on Vultr account
|
||||
func (c *Client) GetDNSDomains() (domains []DNSDomain, err error) {
|
||||
if err := c.get(`dns/list`, &domains); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sort.Sort(dnsdomains(domains))
|
||||
return domains, nil
|
||||
}
|
||||
|
||||
// GetDNSRecords returns a list of all DNS records of a particular domain
|
||||
func (c *Client) GetDNSRecords(domain string) (records []DNSRecord, err error) {
|
||||
if err := c.get(`dns/records?domain=`+domain, &records); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sort.Sort(dnsrecords(records))
|
||||
return records, nil
|
||||
}
|
||||
|
||||
// CreateDNSDomain creates a new DNS domain name on Vultr
|
||||
func (c *Client) CreateDNSDomain(domain, serverIP string) error {
|
||||
values := url.Values{
|
||||
"domain": {domain},
|
||||
"serverip": {serverIP},
|
||||
}
|
||||
|
||||
if err := c.post(`dns/create_domain`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteDNSDomain deletes an existing DNS domain name
|
||||
func (c *Client) DeleteDNSDomain(domain string) error {
|
||||
values := url.Values{
|
||||
"domain": {domain},
|
||||
}
|
||||
|
||||
if err := c.post(`dns/delete_domain`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateDNSRecord creates a new DNS record
|
||||
func (c *Client) CreateDNSRecord(domain, name, rtype, data string, priority, ttl int) error {
|
||||
values := url.Values{
|
||||
"domain": {domain},
|
||||
"name": {name},
|
||||
"type": {rtype},
|
||||
"data": {data},
|
||||
"priority": {fmt.Sprintf("%v", priority)},
|
||||
"ttl": {fmt.Sprintf("%v", ttl)},
|
||||
}
|
||||
|
||||
if err := c.post(`dns/create_record`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateDNSRecord updates an existing DNS record
|
||||
func (c *Client) UpdateDNSRecord(domain string, dnsrecord DNSRecord) error {
|
||||
values := url.Values{
|
||||
"domain": {domain},
|
||||
"RECORDID": {fmt.Sprintf("%v", dnsrecord.RecordID)},
|
||||
}
|
||||
|
||||
if dnsrecord.Name != "" {
|
||||
values.Add("name", dnsrecord.Name)
|
||||
}
|
||||
if dnsrecord.Data != "" {
|
||||
values.Add("data", dnsrecord.Data)
|
||||
}
|
||||
if dnsrecord.Priority != 0 {
|
||||
values.Add("priority", fmt.Sprintf("%v", dnsrecord.Priority))
|
||||
}
|
||||
if dnsrecord.TTL != 0 {
|
||||
values.Add("ttl", fmt.Sprintf("%v", dnsrecord.TTL))
|
||||
}
|
||||
|
||||
if err := c.post(`dns/update_record`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteDNSRecord deletes an existing DNS record
|
||||
func (c *Client) DeleteDNSRecord(domain string, recordID int) error {
|
||||
values := url.Values{
|
||||
"domain": {domain},
|
||||
"RECORDID": {fmt.Sprintf("%v", recordID)},
|
||||
}
|
||||
|
||||
if err := c.post(`dns/delete_record`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
248
vendor/github.com/JamesClonk/vultr/lib/firewall.go
generated
vendored
248
vendor/github.com/JamesClonk/vultr/lib/firewall.go
generated
vendored
@@ -1,248 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FirewallGroup represents a firewall group on Vultr
|
||||
type FirewallGroup struct {
|
||||
ID string `json:"FIREWALLGROUPID"`
|
||||
Description string `json:"description"`
|
||||
Created string `json:"date_created"`
|
||||
Modified string `json:"date_modified"`
|
||||
InstanceCount int `json:"instance_count"`
|
||||
RuleCount int `json:"rule_count"`
|
||||
MaxRuleCount int `json:"max_rule_count"`
|
||||
}
|
||||
|
||||
// FirewallRule represents a firewall rule on Vultr
|
||||
type FirewallRule struct {
|
||||
RuleNumber int `json:"rulenumber"`
|
||||
Action string `json:"action"`
|
||||
Protocol string `json:"protocol"`
|
||||
Port string `json:"port"`
|
||||
Network *net.IPNet
|
||||
}
|
||||
|
||||
type firewallGroups []FirewallGroup
|
||||
|
||||
func (f firewallGroups) Len() int { return len(f) }
|
||||
func (f firewallGroups) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
||||
func (f firewallGroups) Less(i, j int) bool {
|
||||
// sort order: description
|
||||
return strings.ToLower(f[i].Description) < strings.ToLower(f[j].Description)
|
||||
}
|
||||
|
||||
type firewallRules []FirewallRule
|
||||
|
||||
func (r firewallRules) Len() int { return len(r) }
|
||||
func (r firewallRules) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
func (r firewallRules) Less(i, j int) bool {
|
||||
// sort order: rule number
|
||||
return r[i].RuleNumber < r[j].RuleNumber
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaller on FirewallRule.
|
||||
// This is needed because the Vultr API is inconsistent in it's JSON responses.
|
||||
// Some fields can change type, from JSON number to JSON string and vice-versa.
|
||||
func (r *FirewallRule) UnmarshalJSON(data []byte) (err error) {
|
||||
if r == nil {
|
||||
*r = FirewallRule{}
|
||||
}
|
||||
|
||||
var fields map[string]interface{}
|
||||
if err := json.Unmarshal(data, &fields); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
value := fmt.Sprintf("%v", fields["rulenumber"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
number, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.RuleNumber = int(number)
|
||||
|
||||
value = fmt.Sprintf("%v", fields["subnet_size"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
subnetSize, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Action = fmt.Sprintf("%v", fields["action"])
|
||||
r.Protocol = fmt.Sprintf("%v", fields["protocol"])
|
||||
r.Port = fmt.Sprintf("%v", fields["port"])
|
||||
subnet := fmt.Sprintf("%v", fields["subnet"])
|
||||
|
||||
if subnetSize > 0 && len(subnet) > 0 {
|
||||
_, r.Network, err = net.ParseCIDR(fmt.Sprintf("%s/%d", subnet, subnetSize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse subnet from Vultr API")
|
||||
}
|
||||
} else {
|
||||
_, r.Network, _ = net.ParseCIDR("0.0.0.0/0")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetFirewallGroups returns a list of all available firewall groups on Vultr
|
||||
func (c *Client) GetFirewallGroups() ([]FirewallGroup, error) {
|
||||
var groupMap map[string]FirewallGroup
|
||||
if err := c.get(`firewall/group_list`, &groupMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var groupList []FirewallGroup
|
||||
for _, g := range groupMap {
|
||||
groupList = append(groupList, g)
|
||||
}
|
||||
sort.Sort(firewallGroups(groupList))
|
||||
return groupList, nil
|
||||
}
|
||||
|
||||
// GetFirewallGroup returns the firewall group with given ID
|
||||
func (c *Client) GetFirewallGroup(id string) (FirewallGroup, error) {
|
||||
groups, err := c.GetFirewallGroups()
|
||||
if err != nil {
|
||||
return FirewallGroup{}, err
|
||||
}
|
||||
|
||||
for _, g := range groups {
|
||||
if g.ID == id {
|
||||
return g, nil
|
||||
}
|
||||
}
|
||||
return FirewallGroup{}, fmt.Errorf("Firewall group with ID %v not found", id)
|
||||
}
|
||||
|
||||
// CreateFirewallGroup creates a new firewall group in Vultr account
|
||||
func (c *Client) CreateFirewallGroup(description string) (string, error) {
|
||||
values := url.Values{}
|
||||
|
||||
// Optional description
|
||||
if len(description) > 0 {
|
||||
values.Add("description", description)
|
||||
}
|
||||
|
||||
var result FirewallGroup
|
||||
err := c.post(`firewall/group_create`, values, &result)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result.ID, nil
|
||||
}
|
||||
|
||||
// DeleteFirewallGroup deletes an existing firewall group
|
||||
func (c *Client) DeleteFirewallGroup(groupID string) error {
|
||||
values := url.Values{
|
||||
"FIREWALLGROUPID": {groupID},
|
||||
}
|
||||
|
||||
if err := c.post(`firewall/group_delete`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetFirewallGroupDescription sets the description of an existing firewall group
|
||||
func (c *Client) SetFirewallGroupDescription(groupID, description string) error {
|
||||
values := url.Values{
|
||||
"FIREWALLGROUPID": {groupID},
|
||||
"description": {description},
|
||||
}
|
||||
|
||||
if err := c.post(`firewall/group_set_description`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFirewallRules returns a list of rules for the given firewall group
|
||||
func (c *Client) GetFirewallRules(groupID string) ([]FirewallRule, error) {
|
||||
var ruleMap map[string]FirewallRule
|
||||
ipTypes := []string{"v4", "v6"}
|
||||
for _, ipType := range ipTypes {
|
||||
args := fmt.Sprintf("direction=in&FIREWALLGROUPID=%s&ip_type=%s",
|
||||
groupID, ipType)
|
||||
if err := c.get(`firewall/rule_list?`+args, &ruleMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var ruleList []FirewallRule
|
||||
for _, r := range ruleMap {
|
||||
ruleList = append(ruleList, r)
|
||||
}
|
||||
sort.Sort(firewallRules(ruleList))
|
||||
return ruleList, nil
|
||||
}
|
||||
|
||||
// CreateFirewallRule creates a new firewall rule in Vultr account.
|
||||
// groupID is the ID of the firewall group to create the rule in
|
||||
// protocol must be one of: "icmp", "tcp", "udp", "gre"
|
||||
// port can be a port number or colon separated port range (TCP/UDP only)
|
||||
func (c *Client) CreateFirewallRule(groupID, protocol, port string,
|
||||
network *net.IPNet) (int, error) {
|
||||
ip := network.IP.String()
|
||||
maskBits, _ := network.Mask.Size()
|
||||
if ip == "<nil>" {
|
||||
return 0, fmt.Errorf("Invalid network")
|
||||
}
|
||||
|
||||
var ipType string
|
||||
if network.IP.To4() != nil {
|
||||
ipType = "v4"
|
||||
} else {
|
||||
ipType = "v6"
|
||||
}
|
||||
|
||||
values := url.Values{
|
||||
"FIREWALLGROUPID": {groupID},
|
||||
// possible values: "in"
|
||||
"direction": {"in"},
|
||||
// possible values: "icmp", "tcp", "udp", "gre"
|
||||
"protocol": {protocol},
|
||||
// possible values: "v4", "v6"
|
||||
"ip_type": {ipType},
|
||||
// IP address representing a subnet
|
||||
"subnet": {ip},
|
||||
// IP prefix size in bits
|
||||
"subnet_size": {fmt.Sprintf("%v", maskBits)},
|
||||
}
|
||||
|
||||
if len(port) > 0 {
|
||||
values.Add("port", port)
|
||||
}
|
||||
|
||||
var result FirewallRule
|
||||
err := c.post(`firewall/rule_create`, values, &result)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return result.RuleNumber, nil
|
||||
}
|
||||
|
||||
// DeleteFirewallRule deletes an existing firewall rule
|
||||
func (c *Client) DeleteFirewallRule(ruleNumber int, groupID string) error {
|
||||
values := url.Values{
|
||||
"FIREWALLGROUPID": {groupID},
|
||||
"rulenumber": {fmt.Sprintf("%v", ruleNumber)},
|
||||
}
|
||||
|
||||
if err := c.post(`firewall/rule_delete`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
192
vendor/github.com/JamesClonk/vultr/lib/ip.go
generated
vendored
192
vendor/github.com/JamesClonk/vultr/lib/ip.go
generated
vendored
@@ -1,192 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// IPv4 information of a virtual machine
|
||||
type IPv4 struct {
|
||||
IP string `json:"ip"`
|
||||
Netmask string `json:"netmask"`
|
||||
Gateway string `json:"gateway"`
|
||||
Type string `json:"type"`
|
||||
ReverseDNS string `json:"reverse"`
|
||||
}
|
||||
|
||||
type ipv4s []IPv4
|
||||
|
||||
func (s ipv4s) Len() int { return len(s) }
|
||||
func (s ipv4s) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s ipv4s) Less(i, j int) bool {
|
||||
// sort order: type, ip
|
||||
if s[i].Type < s[j].Type {
|
||||
return true
|
||||
} else if s[i].Type > s[j].Type {
|
||||
return false
|
||||
}
|
||||
return s[i].IP < s[j].IP
|
||||
}
|
||||
|
||||
// IPv6 information of a virtual machine
|
||||
type IPv6 struct {
|
||||
IP string `json:"ip"`
|
||||
Network string `json:"network"`
|
||||
NetworkSize string `json:"network_size"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type ipv6s []IPv6
|
||||
|
||||
func (s ipv6s) Len() int { return len(s) }
|
||||
func (s ipv6s) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s ipv6s) Less(i, j int) bool {
|
||||
// sort order: type, ip
|
||||
if s[i].Type < s[j].Type {
|
||||
return true
|
||||
} else if s[i].Type > s[j].Type {
|
||||
return false
|
||||
}
|
||||
return s[i].IP < s[j].IP
|
||||
}
|
||||
|
||||
// ReverseDNSIPv6 information of a virtual machine
|
||||
type ReverseDNSIPv6 struct {
|
||||
IP string `json:"ip"`
|
||||
ReverseDNS string `json:"reverse"`
|
||||
}
|
||||
|
||||
type reverseDNSIPv6s []ReverseDNSIPv6
|
||||
|
||||
func (s reverseDNSIPv6s) Len() int { return len(s) }
|
||||
func (s reverseDNSIPv6s) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s reverseDNSIPv6s) Less(i, j int) bool { return s[i].IP < s[j].IP }
|
||||
|
||||
// ListIPv4 lists the IPv4 information of a virtual machine
|
||||
func (c *Client) ListIPv4(id string) (list []IPv4, err error) {
|
||||
var ipMap map[string][]IPv4
|
||||
if err := c.get(`server/list_ipv4?SUBID=`+id, &ipMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, iplist := range ipMap {
|
||||
for _, ip := range iplist {
|
||||
list = append(list, ip)
|
||||
}
|
||||
}
|
||||
sort.Sort(ipv4s(list))
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// CreateIPv4 creates an IPv4 address and attaches it to a virtual machine
|
||||
func (c *Client) CreateIPv4(id string, reboot bool) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"reboot": {fmt.Sprintf("%t", reboot)},
|
||||
}
|
||||
|
||||
if err := c.post(`server/create_ipv4`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteIPv4 deletes an IPv4 address and detaches it from a virtual machine
|
||||
func (c *Client) DeleteIPv4(id, ip string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"ip": {ip},
|
||||
}
|
||||
|
||||
if err := c.post(`server/destroy_ipv4`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListIPv6 lists the IPv4 information of a virtual machine
|
||||
func (c *Client) ListIPv6(id string) (list []IPv6, err error) {
|
||||
var ipMap map[string][]IPv6
|
||||
if err := c.get(`server/list_ipv6?SUBID=`+id, &ipMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, iplist := range ipMap {
|
||||
for _, ip := range iplist {
|
||||
list = append(list, ip)
|
||||
}
|
||||
}
|
||||
sort.Sort(ipv6s(list))
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// ListIPv6ReverseDNS lists the IPv6 reverse DNS entries of a virtual machine
|
||||
func (c *Client) ListIPv6ReverseDNS(id string) (list []ReverseDNSIPv6, err error) {
|
||||
var ipMap map[string][]ReverseDNSIPv6
|
||||
if err := c.get(`server/reverse_list_ipv6?SUBID=`+id, &ipMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, iplist := range ipMap {
|
||||
for _, ip := range iplist {
|
||||
list = append(list, ip)
|
||||
}
|
||||
}
|
||||
sort.Sort(reverseDNSIPv6s(list))
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// DeleteIPv6ReverseDNS removes a reverse DNS entry for an IPv6 address of a virtual machine
|
||||
func (c *Client) DeleteIPv6ReverseDNS(id string, ip string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"ip": {ip},
|
||||
}
|
||||
|
||||
if err := c.post(`server/reverse_delete_ipv6`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetIPv6ReverseDNS sets a reverse DNS entry for an IPv6 address of a virtual machine
|
||||
func (c *Client) SetIPv6ReverseDNS(id, ip, entry string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"ip": {ip},
|
||||
"entry": {entry},
|
||||
}
|
||||
|
||||
if err := c.post(`server/reverse_set_ipv6`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DefaultIPv4ReverseDNS sets a reverse DNS entry for an IPv4 address of a virtual machine to the original setting
|
||||
func (c *Client) DefaultIPv4ReverseDNS(id, ip string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"ip": {ip},
|
||||
}
|
||||
|
||||
if err := c.post(`server/reverse_default_ipv4`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetIPv4ReverseDNS sets a reverse DNS entry for an IPv4 address of a virtual machine
|
||||
func (c *Client) SetIPv4ReverseDNS(id, ip, entry string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"ip": {ip},
|
||||
"entry": {entry},
|
||||
}
|
||||
|
||||
if err := c.post(`server/reverse_set_ipv4`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
44
vendor/github.com/JamesClonk/vultr/lib/iso.go
generated
vendored
44
vendor/github.com/JamesClonk/vultr/lib/iso.go
generated
vendored
@@ -1,44 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ISO image on Vultr
|
||||
type ISO struct {
|
||||
ID int `json:"ISOID"`
|
||||
Created string `json:"date_created"`
|
||||
Filename string `json:"filename"`
|
||||
Size int `json:"size"`
|
||||
MD5sum string `json:"md5sum"`
|
||||
}
|
||||
|
||||
type isos []ISO
|
||||
|
||||
func (s isos) Len() int { return len(s) }
|
||||
func (s isos) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s isos) Less(i, j int) bool {
|
||||
// sort order: filename, created
|
||||
if strings.ToLower(s[i].Filename) < strings.ToLower(s[j].Filename) {
|
||||
return true
|
||||
} else if strings.ToLower(s[i].Filename) > strings.ToLower(s[j].Filename) {
|
||||
return false
|
||||
}
|
||||
return s[i].Created < s[j].Created
|
||||
}
|
||||
|
||||
// GetISO returns a list of all ISO images on Vultr account
|
||||
func (c *Client) GetISO() ([]ISO, error) {
|
||||
var isoMap map[string]ISO
|
||||
if err := c.get(`iso/list`, &isoMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var isoList []ISO
|
||||
for _, iso := range isoMap {
|
||||
isoList = append(isoList, iso)
|
||||
}
|
||||
sort.Sort(isos(isoList))
|
||||
return isoList, nil
|
||||
}
|
37
vendor/github.com/JamesClonk/vultr/lib/os.go
generated
vendored
37
vendor/github.com/JamesClonk/vultr/lib/os.go
generated
vendored
@@ -1,37 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// OS image on Vultr
|
||||
type OS struct {
|
||||
ID int `json:"OSID"`
|
||||
Name string `json:"name"`
|
||||
Arch string `json:"arch"`
|
||||
Family string `json:"family"`
|
||||
Windows bool `json:"windows"`
|
||||
Surcharge string `json:"surcharge"`
|
||||
}
|
||||
|
||||
type oses []OS
|
||||
|
||||
func (s oses) Len() int { return len(s) }
|
||||
func (s oses) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s oses) Less(i, j int) bool { return strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name) }
|
||||
|
||||
// GetOS returns a list of all available operating systems on Vultr
|
||||
func (c *Client) GetOS() ([]OS, error) {
|
||||
var osMap map[string]OS
|
||||
if err := c.get(`os/list`, &osMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var osList []OS
|
||||
for _, os := range osMap {
|
||||
osList = append(osList, os)
|
||||
}
|
||||
sort.Sort(oses(osList))
|
||||
return osList, nil
|
||||
}
|
78
vendor/github.com/JamesClonk/vultr/lib/plans.go
generated
vendored
78
vendor/github.com/JamesClonk/vultr/lib/plans.go
generated
vendored
@@ -1,78 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Plan on Vultr
|
||||
type Plan struct {
|
||||
ID int `json:"VPSPLANID,string"`
|
||||
Name string `json:"name"`
|
||||
VCpus int `json:"vcpu_count,string"`
|
||||
RAM string `json:"ram"`
|
||||
Disk string `json:"disk"`
|
||||
Bandwidth string `json:"bandwidth"`
|
||||
Price string `json:"price_per_month"`
|
||||
Regions []int `json:"available_locations"`
|
||||
}
|
||||
|
||||
type plans []Plan
|
||||
|
||||
func (p plans) Len() int { return len(p) }
|
||||
func (p plans) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p plans) Less(i, j int) bool {
|
||||
pa, _ := strconv.ParseFloat(strings.TrimSpace(p[i].Price), 64)
|
||||
pb, _ := strconv.ParseFloat(strings.TrimSpace(p[j].Price), 64)
|
||||
ra, _ := strconv.ParseInt(strings.TrimSpace(p[i].RAM), 10, 64)
|
||||
rb, _ := strconv.ParseInt(strings.TrimSpace(p[j].RAM), 10, 64)
|
||||
da, _ := strconv.ParseInt(strings.TrimSpace(p[i].Disk), 10, 64)
|
||||
db, _ := strconv.ParseInt(strings.TrimSpace(p[j].Disk), 10, 64)
|
||||
|
||||
// sort order: price, vcpu, ram, disk
|
||||
if pa < pb {
|
||||
return true
|
||||
} else if pa > pb {
|
||||
return false
|
||||
}
|
||||
|
||||
if p[i].VCpus < p[j].VCpus {
|
||||
return true
|
||||
} else if p[i].VCpus > p[j].VCpus {
|
||||
return false
|
||||
}
|
||||
|
||||
if ra < rb {
|
||||
return true
|
||||
} else if ra > rb {
|
||||
return false
|
||||
}
|
||||
|
||||
return da < db
|
||||
}
|
||||
|
||||
// GetPlans returns a list of all available plans on Vultr account
|
||||
func (c *Client) GetPlans() ([]Plan, error) {
|
||||
var planMap map[string]Plan
|
||||
if err := c.get(`plans/list`, &planMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var p plans
|
||||
for _, plan := range planMap {
|
||||
p = append(p, plan)
|
||||
}
|
||||
|
||||
sort.Sort(plans(p))
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// GetAvailablePlansForRegion returns available plans for specified region
|
||||
func (c *Client) GetAvailablePlansForRegion(id int) (planIDs []int, err error) {
|
||||
if err := c.get(fmt.Sprintf(`regions/availability?DCID=%v`, id), &planIDs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return
|
||||
}
|
44
vendor/github.com/JamesClonk/vultr/lib/regions.go
generated
vendored
44
vendor/github.com/JamesClonk/vultr/lib/regions.go
generated
vendored
@@ -1,44 +0,0 @@
|
||||
package lib
|
||||
|
||||
import "sort"
|
||||
|
||||
// Region on Vultr
|
||||
type Region struct {
|
||||
ID int `json:"DCID,string"`
|
||||
Name string `json:"name"`
|
||||
Country string `json:"country"`
|
||||
Continent string `json:"continent"`
|
||||
State string `json:"state"`
|
||||
Ddos bool `json:"ddos_protection"`
|
||||
BlockStorage bool `json:"block_storage"`
|
||||
Code string `json:"regioncode"`
|
||||
}
|
||||
|
||||
type regions []Region
|
||||
|
||||
func (s regions) Len() int { return len(s) }
|
||||
func (s regions) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s regions) Less(i, j int) bool {
|
||||
// sort order: continent, name
|
||||
if s[i].Continent < s[j].Continent {
|
||||
return true
|
||||
} else if s[i].Continent > s[j].Continent {
|
||||
return false
|
||||
}
|
||||
return s[i].Name < s[j].Name
|
||||
}
|
||||
|
||||
// GetRegions returns a list of all available Vultr regions
|
||||
func (c *Client) GetRegions() ([]Region, error) {
|
||||
var regionMap map[string]Region
|
||||
if err := c.get(`regions/list`, ®ionMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var regionList []Region
|
||||
for _, os := range regionMap {
|
||||
regionList = append(regionList, os)
|
||||
}
|
||||
sort.Sort(regions(regionList))
|
||||
return regionList, nil
|
||||
}
|
192
vendor/github.com/JamesClonk/vultr/lib/reservedip.go
generated
vendored
192
vendor/github.com/JamesClonk/vultr/lib/reservedip.go
generated
vendored
@@ -1,192 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// IP on Vultr
|
||||
type IP struct {
|
||||
ID string `json:"SUBID,string"`
|
||||
RegionID int `json:"DCID,string"`
|
||||
IPType string `json:"ip_type"`
|
||||
Subnet string `json:"subnet"`
|
||||
SubnetSize int `json:"subnet_size"`
|
||||
Label string `json:"label"`
|
||||
AttachedTo string `json:"attached_SUBID,string"`
|
||||
}
|
||||
|
||||
type ips []IP
|
||||
|
||||
func (s ips) Len() int { return len(s) }
|
||||
func (s ips) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s ips) Less(i, j int) bool {
|
||||
// sort order: label, iptype, subnet
|
||||
if strings.ToLower(s[i].Label) < strings.ToLower(s[j].Label) {
|
||||
return true
|
||||
} else if strings.ToLower(s[i].Label) > strings.ToLower(s[j].Label) {
|
||||
return false
|
||||
}
|
||||
if s[i].IPType < s[j].IPType {
|
||||
return true
|
||||
} else if s[i].IPType > s[j].IPType {
|
||||
return false
|
||||
}
|
||||
return s[i].Subnet < s[j].Subnet
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaller on IP.
|
||||
// This is needed because the Vultr API is inconsistent in it's JSON responses.
|
||||
// Some fields can change type, from JSON number to JSON string and vice-versa.
|
||||
func (i *IP) UnmarshalJSON(data []byte) (err error) {
|
||||
if i == nil {
|
||||
*i = IP{}
|
||||
}
|
||||
|
||||
var fields map[string]interface{}
|
||||
if err := json.Unmarshal(data, &fields); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
value := fmt.Sprintf("%v", fields["SUBID"])
|
||||
if len(value) == 0 || value == "<nil>" || value == "0" {
|
||||
i.ID = ""
|
||||
} else {
|
||||
id, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.ID = strconv.FormatFloat(id, 'f', -1, 64)
|
||||
}
|
||||
|
||||
value = fmt.Sprintf("%v", fields["DCID"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
region, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.RegionID = int(region)
|
||||
|
||||
value = fmt.Sprintf("%v", fields["attached_SUBID"])
|
||||
if len(value) == 0 || value == "<nil>" || value == "0" || value == "false" {
|
||||
i.AttachedTo = ""
|
||||
} else {
|
||||
attached, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.AttachedTo = strconv.FormatFloat(attached, 'f', -1, 64)
|
||||
}
|
||||
|
||||
value = fmt.Sprintf("%v", fields["subnet_size"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
size, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.SubnetSize = int(size)
|
||||
|
||||
i.IPType = fmt.Sprintf("%v", fields["ip_type"])
|
||||
i.Subnet = fmt.Sprintf("%v", fields["subnet"])
|
||||
i.Label = fmt.Sprintf("%v", fields["label"])
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListReservedIP returns a list of all available reserved IPs on Vultr account
|
||||
func (c *Client) ListReservedIP() ([]IP, error) {
|
||||
var ipMap map[string]IP
|
||||
|
||||
err := c.get(`reservedip/list`, &ipMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ipList := make([]IP, 0)
|
||||
for _, ip := range ipMap {
|
||||
ipList = append(ipList, ip)
|
||||
}
|
||||
sort.Sort(ips(ipList))
|
||||
return ipList, nil
|
||||
}
|
||||
|
||||
// GetReservedIP returns reserved IP with given ID
|
||||
func (c *Client) GetReservedIP(id string) (IP, error) {
|
||||
var ipMap map[string]IP
|
||||
|
||||
err := c.get(`reservedip/list`, &ipMap)
|
||||
if err != nil {
|
||||
return IP{}, err
|
||||
}
|
||||
if ip, ok := ipMap[id]; ok {
|
||||
return ip, nil
|
||||
}
|
||||
return IP{}, fmt.Errorf("IP with ID %v not found", id)
|
||||
}
|
||||
|
||||
// CreateReservedIP creates a new reserved IP on Vultr account
|
||||
func (c *Client) CreateReservedIP(regionID int, ipType string, label string) (string, error) {
|
||||
values := url.Values{
|
||||
"DCID": {fmt.Sprintf("%v", regionID)},
|
||||
"ip_type": {ipType},
|
||||
}
|
||||
if len(label) > 0 {
|
||||
values.Add("label", label)
|
||||
}
|
||||
|
||||
result := IP{}
|
||||
err := c.post(`reservedip/create`, values, &result)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result.ID, nil
|
||||
}
|
||||
|
||||
// DestroyReservedIP deletes an existing reserved IP
|
||||
func (c *Client) DestroyReservedIP(id string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
}
|
||||
return c.post(`reservedip/destroy`, values, nil)
|
||||
}
|
||||
|
||||
// AttachReservedIP attaches a reserved IP to a virtual machine
|
||||
func (c *Client) AttachReservedIP(ip string, serverID string) error {
|
||||
values := url.Values{
|
||||
"ip_address": {ip},
|
||||
"attach_SUBID": {serverID},
|
||||
}
|
||||
return c.post(`reservedip/attach`, values, nil)
|
||||
}
|
||||
|
||||
// DetachReservedIP detaches a reserved IP from an existing virtual machine
|
||||
func (c *Client) DetachReservedIP(serverID string, ip string) error {
|
||||
values := url.Values{
|
||||
"ip_address": {ip},
|
||||
"detach_SUBID": {serverID},
|
||||
}
|
||||
return c.post(`reservedip/detach`, values, nil)
|
||||
}
|
||||
|
||||
// ConvertReservedIP converts an existing virtual machines IP to a reserved IP
|
||||
func (c *Client) ConvertReservedIP(serverID string, ip string) (string, error) {
|
||||
values := url.Values{
|
||||
"SUBID": {serverID},
|
||||
"ip_address": {ip},
|
||||
}
|
||||
|
||||
result := IP{}
|
||||
err := c.post(`reservedip/convert`, values, &result)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result.ID, err
|
||||
}
|
126
vendor/github.com/JamesClonk/vultr/lib/scripts.go
generated
vendored
126
vendor/github.com/JamesClonk/vultr/lib/scripts.go
generated
vendored
@@ -1,126 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// StartupScript on Vultr account
|
||||
type StartupScript struct {
|
||||
ID string `json:"SCRIPTID"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Content string `json:"script"`
|
||||
}
|
||||
|
||||
type startupscripts []StartupScript
|
||||
|
||||
func (s startupscripts) Len() int { return len(s) }
|
||||
func (s startupscripts) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s startupscripts) Less(i, j int) bool {
|
||||
return strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaller on StartupScript.
|
||||
// Necessary because the SCRIPTID field has inconsistent types.
|
||||
func (s *StartupScript) UnmarshalJSON(data []byte) (err error) {
|
||||
if s == nil {
|
||||
*s = StartupScript{}
|
||||
}
|
||||
|
||||
var fields map[string]interface{}
|
||||
if err := json.Unmarshal(data, &fields); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.ID = fmt.Sprintf("%v", fields["SCRIPTID"])
|
||||
s.Name = fmt.Sprintf("%v", fields["name"])
|
||||
s.Type = fmt.Sprintf("%v", fields["type"])
|
||||
s.Content = fmt.Sprintf("%v", fields["script"])
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetStartupScripts returns a list of all startup scripts on the current Vultr account
|
||||
func (c *Client) GetStartupScripts() (scripts []StartupScript, err error) {
|
||||
var scriptMap map[string]StartupScript
|
||||
if err := c.get(`startupscript/list`, &scriptMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, script := range scriptMap {
|
||||
if script.Type == "" {
|
||||
script.Type = "boot" // set default script type
|
||||
}
|
||||
scripts = append(scripts, script)
|
||||
}
|
||||
sort.Sort(startupscripts(scripts))
|
||||
return scripts, nil
|
||||
}
|
||||
|
||||
// GetStartupScript returns the startup script with the given ID
|
||||
func (c *Client) GetStartupScript(id string) (StartupScript, error) {
|
||||
scripts, err := c.GetStartupScripts()
|
||||
if err != nil {
|
||||
return StartupScript{}, err
|
||||
}
|
||||
|
||||
for _, s := range scripts {
|
||||
if s.ID == id {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
return StartupScript{}, nil
|
||||
}
|
||||
|
||||
// CreateStartupScript creates a new startup script
|
||||
func (c *Client) CreateStartupScript(name, content, scriptType string) (StartupScript, error) {
|
||||
values := url.Values{
|
||||
"name": {name},
|
||||
"script": {content},
|
||||
"type": {scriptType},
|
||||
}
|
||||
|
||||
var script StartupScript
|
||||
if err := c.post(`startupscript/create`, values, &script); err != nil {
|
||||
return StartupScript{}, err
|
||||
}
|
||||
script.Name = name
|
||||
script.Content = content
|
||||
script.Type = scriptType
|
||||
|
||||
return script, nil
|
||||
}
|
||||
|
||||
// UpdateStartupScript updates an existing startup script
|
||||
func (c *Client) UpdateStartupScript(script StartupScript) error {
|
||||
values := url.Values{
|
||||
"SCRIPTID": {script.ID},
|
||||
}
|
||||
if script.Name != "" {
|
||||
values.Add("name", script.Name)
|
||||
}
|
||||
if script.Content != "" {
|
||||
values.Add("script", script.Content)
|
||||
}
|
||||
|
||||
if err := c.post(`startupscript/update`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteStartupScript deletes an existing startup script from Vultr account
|
||||
func (c *Client) DeleteStartupScript(id string) error {
|
||||
values := url.Values{
|
||||
"SCRIPTID": {id},
|
||||
}
|
||||
|
||||
if err := c.post(`startupscript/destroy`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
561
vendor/github.com/JamesClonk/vultr/lib/servers.go
generated
vendored
561
vendor/github.com/JamesClonk/vultr/lib/servers.go
generated
vendored
@@ -1,561 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Server (virtual machine) on Vultr account
|
||||
type Server struct {
|
||||
ID string `json:"SUBID"`
|
||||
Name string `json:"label"`
|
||||
OS string `json:"os"`
|
||||
RAM string `json:"ram"`
|
||||
Disk string `json:"disk"`
|
||||
MainIP string `json:"main_ip"`
|
||||
VCpus int `json:"vcpu_count,string"`
|
||||
Location string `json:"location"`
|
||||
RegionID int `json:"DCID,string"`
|
||||
DefaultPassword string `json:"default_password"`
|
||||
Created string `json:"date_created"`
|
||||
PendingCharges float64 `json:"pending_charges"`
|
||||
Status string `json:"status"`
|
||||
Cost string `json:"cost_per_month"`
|
||||
CurrentBandwidth float64 `json:"current_bandwidth_gb"`
|
||||
AllowedBandwidth float64 `json:"allowed_bandwidth_gb,string"`
|
||||
NetmaskV4 string `json:"netmask_v4"`
|
||||
GatewayV4 string `json:"gateway_v4"`
|
||||
PowerStatus string `json:"power_status"`
|
||||
ServerState string `json:"server_state"`
|
||||
PlanID int `json:"VPSPLANID,string"`
|
||||
V6Networks []V6Network `json:"v6_networks"`
|
||||
InternalIP string `json:"internal_ip"`
|
||||
KVMUrl string `json:"kvm_url"`
|
||||
AutoBackups string `json:"auto_backups"`
|
||||
Tag string `json:"tag"`
|
||||
OSID string `json:"OSID"`
|
||||
AppID string `json:"APPID"`
|
||||
FirewallGroupID string `json:"FIREWALLGROUPID"`
|
||||
}
|
||||
|
||||
// ServerOptions are optional parameters to be used during server creation
|
||||
type ServerOptions struct {
|
||||
IPXEChainURL string
|
||||
ISO int
|
||||
Script int
|
||||
UserData string
|
||||
Snapshot string
|
||||
SSHKey string
|
||||
ReservedIP string
|
||||
IPV6 bool
|
||||
PrivateNetworking bool
|
||||
AutoBackups bool
|
||||
DontNotifyOnActivate bool
|
||||
Hostname string
|
||||
Tag string
|
||||
AppID string
|
||||
FirewallGroupID string
|
||||
}
|
||||
|
||||
type servers []Server
|
||||
|
||||
func (s servers) Len() int { return len(s) }
|
||||
func (s servers) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s servers) Less(i, j int) bool {
|
||||
// sort order: name, ip
|
||||
if strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name) {
|
||||
return true
|
||||
} else if strings.ToLower(s[i].Name) > strings.ToLower(s[j].Name) {
|
||||
return false
|
||||
}
|
||||
return s[i].MainIP < s[j].MainIP
|
||||
}
|
||||
|
||||
// V6Network represents a IPv6 network of a Vultr server
|
||||
type V6Network struct {
|
||||
Network string `json:"v6_network"`
|
||||
MainIP string `json:"v6_main_ip"`
|
||||
NetworkSize string `json:"v6_network_size"`
|
||||
}
|
||||
|
||||
// ISOStatus represents an ISO image attached to a Vultr server
|
||||
type ISOStatus struct {
|
||||
State string `json:"state"`
|
||||
ISOID string `json:"ISOID"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaller on Server.
|
||||
// This is needed because the Vultr API is inconsistent in it's JSON responses for servers.
|
||||
// Some fields can change type, from JSON number to JSON string and vice-versa.
|
||||
func (s *Server) UnmarshalJSON(data []byte) (err error) {
|
||||
if s == nil {
|
||||
*s = Server{}
|
||||
}
|
||||
|
||||
var fields map[string]interface{}
|
||||
if err := json.Unmarshal(data, &fields); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
value := fmt.Sprintf("%v", fields["vcpu_count"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
vcpu, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.VCpus = int(vcpu)
|
||||
|
||||
value = fmt.Sprintf("%v", fields["DCID"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
region, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.RegionID = int(region)
|
||||
|
||||
value = fmt.Sprintf("%v", fields["VPSPLANID"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
plan, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.PlanID = int(plan)
|
||||
|
||||
value = fmt.Sprintf("%v", fields["pending_charges"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
pc, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.PendingCharges = pc
|
||||
|
||||
value = fmt.Sprintf("%v", fields["current_bandwidth_gb"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
cb, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.CurrentBandwidth = cb
|
||||
|
||||
value = fmt.Sprintf("%v", fields["allowed_bandwidth_gb"])
|
||||
if len(value) == 0 || value == "<nil>" {
|
||||
value = "0"
|
||||
}
|
||||
ab, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.AllowedBandwidth = ab
|
||||
|
||||
value = fmt.Sprintf("%v", fields["OSID"])
|
||||
if value == "<nil>" {
|
||||
value = ""
|
||||
}
|
||||
s.OSID = value
|
||||
|
||||
value = fmt.Sprintf("%v", fields["APPID"])
|
||||
if value == "<nil>" || value == "0" {
|
||||
value = ""
|
||||
}
|
||||
s.AppID = value
|
||||
|
||||
value = fmt.Sprintf("%v", fields["FIREWALLGROUPID"])
|
||||
if value == "<nil>" || value == "0" {
|
||||
value = ""
|
||||
}
|
||||
s.FirewallGroupID = value
|
||||
|
||||
s.ID = fmt.Sprintf("%v", fields["SUBID"])
|
||||
s.Name = fmt.Sprintf("%v", fields["label"])
|
||||
s.OS = fmt.Sprintf("%v", fields["os"])
|
||||
s.RAM = fmt.Sprintf("%v", fields["ram"])
|
||||
s.Disk = fmt.Sprintf("%v", fields["disk"])
|
||||
s.MainIP = fmt.Sprintf("%v", fields["main_ip"])
|
||||
s.Location = fmt.Sprintf("%v", fields["location"])
|
||||
s.DefaultPassword = fmt.Sprintf("%v", fields["default_password"])
|
||||
s.Created = fmt.Sprintf("%v", fields["date_created"])
|
||||
s.Status = fmt.Sprintf("%v", fields["status"])
|
||||
s.Cost = fmt.Sprintf("%v", fields["cost_per_month"])
|
||||
s.NetmaskV4 = fmt.Sprintf("%v", fields["netmask_v4"])
|
||||
s.GatewayV4 = fmt.Sprintf("%v", fields["gateway_v4"])
|
||||
s.PowerStatus = fmt.Sprintf("%v", fields["power_status"])
|
||||
s.ServerState = fmt.Sprintf("%v", fields["server_state"])
|
||||
|
||||
v6networks := make([]V6Network, 0)
|
||||
if networks, ok := fields["v6_networks"].([]interface{}); ok {
|
||||
for _, network := range networks {
|
||||
if network, ok := network.(map[string]interface{}); ok {
|
||||
v6network := V6Network{
|
||||
Network: fmt.Sprintf("%v", network["v6_network"]),
|
||||
MainIP: fmt.Sprintf("%v", network["v6_main_ip"]),
|
||||
NetworkSize: fmt.Sprintf("%v", network["v6_network_size"]),
|
||||
}
|
||||
v6networks = append(v6networks, v6network)
|
||||
}
|
||||
}
|
||||
s.V6Networks = v6networks
|
||||
}
|
||||
|
||||
s.InternalIP = fmt.Sprintf("%v", fields["internal_ip"])
|
||||
s.KVMUrl = fmt.Sprintf("%v", fields["kvm_url"])
|
||||
s.AutoBackups = fmt.Sprintf("%v", fields["auto_backups"])
|
||||
s.Tag = fmt.Sprintf("%v", fields["tag"])
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetServers returns a list of current virtual machines on Vultr account
|
||||
func (c *Client) GetServers() (serverList []Server, err error) {
|
||||
var serverMap map[string]Server
|
||||
if err := c.get(`server/list`, &serverMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, server := range serverMap {
|
||||
serverList = append(serverList, server)
|
||||
}
|
||||
sort.Sort(servers(serverList))
|
||||
return serverList, nil
|
||||
}
|
||||
|
||||
// GetServersByTag returns a list of all virtual machines matching by tag
|
||||
func (c *Client) GetServersByTag(tag string) (serverList []Server, err error) {
|
||||
var serverMap map[string]Server
|
||||
if err := c.get(`server/list?tag=`+tag, &serverMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, server := range serverMap {
|
||||
serverList = append(serverList, server)
|
||||
}
|
||||
sort.Sort(servers(serverList))
|
||||
return serverList, nil
|
||||
}
|
||||
|
||||
// GetServer returns the virtual machine with the given ID
|
||||
func (c *Client) GetServer(id string) (server Server, err error) {
|
||||
if err := c.get(`server/list?SUBID=`+id, &server); err != nil {
|
||||
return Server{}, err
|
||||
}
|
||||
return server, nil
|
||||
}
|
||||
|
||||
// CreateServer creates a new virtual machine on Vultr. ServerOptions are optional settings.
|
||||
func (c *Client) CreateServer(name string, regionID, planID, osID int, options *ServerOptions) (Server, error) {
|
||||
values := url.Values{
|
||||
"label": {name},
|
||||
"DCID": {fmt.Sprintf("%v", regionID)},
|
||||
"VPSPLANID": {fmt.Sprintf("%v", planID)},
|
||||
"OSID": {fmt.Sprintf("%v", osID)},
|
||||
}
|
||||
|
||||
if options != nil {
|
||||
if options.IPXEChainURL != "" {
|
||||
values.Add("ipxe_chain_url", options.IPXEChainURL)
|
||||
}
|
||||
|
||||
if options.ISO != 0 {
|
||||
values.Add("ISOID", fmt.Sprintf("%v", options.ISO))
|
||||
}
|
||||
|
||||
if options.Script != 0 {
|
||||
values.Add("SCRIPTID", fmt.Sprintf("%v", options.Script))
|
||||
}
|
||||
|
||||
if options.UserData != "" {
|
||||
values.Add("userdata", base64.StdEncoding.EncodeToString([]byte(options.UserData)))
|
||||
}
|
||||
|
||||
if options.Snapshot != "" {
|
||||
values.Add("SNAPSHOTID", options.Snapshot)
|
||||
}
|
||||
|
||||
if options.SSHKey != "" {
|
||||
values.Add("SSHKEYID", options.SSHKey)
|
||||
}
|
||||
|
||||
if options.ReservedIP != "" {
|
||||
values.Add("reserved_ip_v4", options.ReservedIP)
|
||||
}
|
||||
|
||||
values.Add("enable_ipv6", "no")
|
||||
if options.IPV6 {
|
||||
values.Set("enable_ipv6", "yes")
|
||||
}
|
||||
|
||||
values.Add("enable_private_network", "no")
|
||||
if options.PrivateNetworking {
|
||||
values.Set("enable_private_network", "yes")
|
||||
}
|
||||
|
||||
values.Add("auto_backups", "no")
|
||||
if options.AutoBackups {
|
||||
values.Set("auto_backups", "yes")
|
||||
}
|
||||
|
||||
values.Add("notify_activate", "yes")
|
||||
if options.DontNotifyOnActivate {
|
||||
values.Set("notify_activate", "no")
|
||||
}
|
||||
|
||||
if options.Hostname != "" {
|
||||
values.Add("hostname", options.Hostname)
|
||||
}
|
||||
|
||||
if options.Tag != "" {
|
||||
values.Add("tag", options.Tag)
|
||||
}
|
||||
|
||||
if options.AppID != "" {
|
||||
values.Add("APPID", options.AppID)
|
||||
}
|
||||
|
||||
if options.FirewallGroupID != "" {
|
||||
values.Add("FIREWALLGROUPID", options.FirewallGroupID)
|
||||
}
|
||||
}
|
||||
|
||||
var server Server
|
||||
if err := c.post(`server/create`, values, &server); err != nil {
|
||||
return Server{}, err
|
||||
}
|
||||
server.Name = name
|
||||
server.RegionID = regionID
|
||||
server.PlanID = planID
|
||||
|
||||
return server, nil
|
||||
}
|
||||
|
||||
// RenameServer renames an existing virtual machine
|
||||
func (c *Client) RenameServer(id, name string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"label": {name},
|
||||
}
|
||||
|
||||
if err := c.post(`server/label_set`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TagServer replaces the tag on an existing virtual machine
|
||||
func (c *Client) TagServer(id, tag string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"tag": {tag},
|
||||
}
|
||||
|
||||
if err := c.post(`server/tag_set`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartServer starts an existing virtual machine
|
||||
func (c *Client) StartServer(id string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
}
|
||||
|
||||
if err := c.post(`server/start`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HaltServer stops an existing virtual machine
|
||||
func (c *Client) HaltServer(id string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
}
|
||||
|
||||
if err := c.post(`server/halt`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RebootServer reboots an existing virtual machine
|
||||
func (c *Client) RebootServer(id string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
}
|
||||
|
||||
if err := c.post(`server/reboot`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReinstallServer reinstalls the operating system on an existing virtual machine
|
||||
func (c *Client) ReinstallServer(id string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
}
|
||||
|
||||
if err := c.post(`server/reinstall`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChangeOSofServer changes the virtual machine to a different operating system
|
||||
func (c *Client) ChangeOSofServer(id string, osID int) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"OSID": {fmt.Sprintf("%v", osID)},
|
||||
}
|
||||
|
||||
if err := c.post(`server/os_change`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListOSforServer lists all available operating systems to which an existing virtual machine can be changed
|
||||
func (c *Client) ListOSforServer(id string) (os []OS, err error) {
|
||||
var osMap map[string]OS
|
||||
if err := c.get(`server/os_change_list?SUBID=`+id, &osMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, o := range osMap {
|
||||
os = append(os, o)
|
||||
}
|
||||
sort.Sort(oses(os))
|
||||
return os, nil
|
||||
}
|
||||
|
||||
// AttachISOtoServer attaches an ISO image to an existing virtual machine and reboots it
|
||||
func (c *Client) AttachISOtoServer(id string, isoID int) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"ISOID": {fmt.Sprintf("%v", isoID)},
|
||||
}
|
||||
|
||||
if err := c.post(`server/iso_attach`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachISOfromServer detaches the currently mounted ISO image from the virtual machine and reboots it
|
||||
func (c *Client) DetachISOfromServer(id string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
}
|
||||
|
||||
if err := c.post(`server/iso_detach`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetISOStatusofServer retrieves the current ISO image state of an existing virtual machine
|
||||
func (c *Client) GetISOStatusofServer(id string) (isoStatus ISOStatus, err error) {
|
||||
if err := c.get(`server/iso_status?SUBID=`+id, &isoStatus); err != nil {
|
||||
return ISOStatus{}, err
|
||||
}
|
||||
return isoStatus, nil
|
||||
}
|
||||
|
||||
// DeleteServer deletes an existing virtual machine
|
||||
func (c *Client) DeleteServer(id string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
}
|
||||
|
||||
if err := c.post(`server/destroy`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetFirewallGroup adds a virtual machine to a firewall group
|
||||
func (c *Client) SetFirewallGroup(id, firewallgroup string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"FIREWALLGROUPID": {firewallgroup},
|
||||
}
|
||||
|
||||
if err := c.post(`server/firewall_group_set`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnsetFirewallGroup removes a virtual machine from a firewall group
|
||||
func (c *Client) UnsetFirewallGroup(id string) error {
|
||||
return c.SetFirewallGroup(id, "0")
|
||||
}
|
||||
|
||||
// BandwidthOfServer retrieves the bandwidth used by a virtual machine
|
||||
func (c *Client) BandwidthOfServer(id string) (bandwidth []map[string]string, err error) {
|
||||
var bandwidthMap map[string][][]string
|
||||
if err := c.get(`server/bandwidth?SUBID=`+id, &bandwidthMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// parse incoming bytes
|
||||
for _, b := range bandwidthMap["incoming_bytes"] {
|
||||
bMap := make(map[string]string)
|
||||
bMap["date"] = b[0]
|
||||
bMap["incoming"] = b[1]
|
||||
bandwidth = append(bandwidth, bMap)
|
||||
}
|
||||
|
||||
// parse outgoing bytes (we'll assume that incoming and outgoing dates are always a match)
|
||||
for _, b := range bandwidthMap["outgoing_bytes"] {
|
||||
for i := range bandwidth {
|
||||
if bandwidth[i]["date"] == b[0] {
|
||||
bandwidth[i]["outgoing"] = b[1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bandwidth, nil
|
||||
}
|
||||
|
||||
// ChangeApplicationofServer changes the virtual machine to a different application
|
||||
func (c *Client) ChangeApplicationofServer(id string, appID string) error {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"APPID": {appID},
|
||||
}
|
||||
|
||||
if err := c.post(`server/app_change`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListApplicationsforServer lists all available operating systems to which an existing virtual machine can be changed
|
||||
func (c *Client) ListApplicationsforServer(id string) (apps []Application, err error) {
|
||||
var appMap map[string]Application
|
||||
if err := c.get(`server/app_change_list?SUBID=`+id, &appMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, app := range appMap {
|
||||
apps = append(apps, app)
|
||||
}
|
||||
sort.Sort(applications(apps))
|
||||
return apps, nil
|
||||
}
|
72
vendor/github.com/JamesClonk/vultr/lib/snapshots.go
generated
vendored
72
vendor/github.com/JamesClonk/vultr/lib/snapshots.go
generated
vendored
@@ -1,72 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Snapshot of a virtual machine on Vultr account
|
||||
type Snapshot struct {
|
||||
ID string `json:"SNAPSHOTID"`
|
||||
Description string `json:"description"`
|
||||
Size string `json:"size"`
|
||||
Status string `json:"status"`
|
||||
Created string `json:"date_created"`
|
||||
}
|
||||
|
||||
type snapshots []Snapshot
|
||||
|
||||
func (s snapshots) Len() int { return len(s) }
|
||||
func (s snapshots) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s snapshots) Less(i, j int) bool {
|
||||
// sort order: description, created
|
||||
if strings.ToLower(s[i].Description) < strings.ToLower(s[j].Description) {
|
||||
return true
|
||||
} else if strings.ToLower(s[i].Description) > strings.ToLower(s[j].Description) {
|
||||
return false
|
||||
}
|
||||
return s[i].Created < s[j].Created
|
||||
}
|
||||
|
||||
// GetSnapshots retrieves a list of all snapshots on Vultr account
|
||||
func (c *Client) GetSnapshots() (snapshotList []Snapshot, err error) {
|
||||
var snapshotMap map[string]Snapshot
|
||||
if err := c.get(`snapshot/list`, &snapshotMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, snapshot := range snapshotMap {
|
||||
snapshotList = append(snapshotList, snapshot)
|
||||
}
|
||||
sort.Sort(snapshots(snapshotList))
|
||||
return snapshotList, nil
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a new virtual machine snapshot
|
||||
func (c *Client) CreateSnapshot(id, description string) (Snapshot, error) {
|
||||
values := url.Values{
|
||||
"SUBID": {id},
|
||||
"description": {description},
|
||||
}
|
||||
|
||||
var snapshot Snapshot
|
||||
if err := c.post(`snapshot/create`, values, &snapshot); err != nil {
|
||||
return Snapshot{}, err
|
||||
}
|
||||
snapshot.Description = description
|
||||
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// DeleteSnapshot deletes an existing virtual machine snapshot
|
||||
func (c *Client) DeleteSnapshot(id string) error {
|
||||
values := url.Values{
|
||||
"SNAPSHOTID": {id},
|
||||
}
|
||||
|
||||
if err := c.post(`snapshot/destroy`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
82
vendor/github.com/JamesClonk/vultr/lib/sshkeys.go
generated
vendored
82
vendor/github.com/JamesClonk/vultr/lib/sshkeys.go
generated
vendored
@@ -1,82 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SSHKey on Vultr account
|
||||
type SSHKey struct {
|
||||
ID string `json:"SSHKEYID"`
|
||||
Name string `json:"name"`
|
||||
Key string `json:"ssh_key"`
|
||||
Created string `json:"date_created"`
|
||||
}
|
||||
|
||||
type sshkeys []SSHKey
|
||||
|
||||
func (s sshkeys) Len() int { return len(s) }
|
||||
func (s sshkeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s sshkeys) Less(i, j int) bool { return strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name) }
|
||||
|
||||
// GetSSHKeys returns a list of SSHKeys from Vultr account
|
||||
func (c *Client) GetSSHKeys() (keys []SSHKey, err error) {
|
||||
var keyMap map[string]SSHKey
|
||||
if err := c.get(`sshkey/list`, &keyMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, key := range keyMap {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Sort(sshkeys(keys))
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// CreateSSHKey creates new SSHKey on Vultr
|
||||
func (c *Client) CreateSSHKey(name, key string) (SSHKey, error) {
|
||||
values := url.Values{
|
||||
"name": {name},
|
||||
"ssh_key": {key},
|
||||
}
|
||||
|
||||
var sshKey SSHKey
|
||||
if err := c.post(`sshkey/create`, values, &sshKey); err != nil {
|
||||
return SSHKey{}, err
|
||||
}
|
||||
sshKey.Name = name
|
||||
sshKey.Key = key
|
||||
|
||||
return sshKey, nil
|
||||
}
|
||||
|
||||
// UpdateSSHKey updates an existing SSHKey entry
|
||||
func (c *Client) UpdateSSHKey(key SSHKey) error {
|
||||
values := url.Values{
|
||||
"SSHKEYID": {key.ID},
|
||||
}
|
||||
if key.Name != "" {
|
||||
values.Add("name", key.Name)
|
||||
}
|
||||
if key.Key != "" {
|
||||
values.Add("ssh_key", key.Key)
|
||||
}
|
||||
|
||||
if err := c.post(`sshkey/update`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteSSHKey deletes an existing SSHKey from Vultr account
|
||||
func (c *Client) DeleteSSHKey(id string) error {
|
||||
values := url.Values{
|
||||
"SSHKEYID": {id},
|
||||
}
|
||||
|
||||
if err := c.post(`sshkey/destroy`, values, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
21
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/edgegrid/signer.go
generated
vendored
21
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/edgegrid/signer.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -32,6 +33,26 @@ func AddRequestHeader(config Config, req *http.Request) *http.Request {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
_, AkamaiCliEnvOK := os.LookupEnv("AKAMAI_CLI")
|
||||
AkamaiCliVersionEnv, AkamaiCliVersionEnvOK := os.LookupEnv("AKAMAI_CLI_VERSION")
|
||||
AkamaiCliCommandEnv, AkamaiCliCommandEnvOK := os.LookupEnv("AKAMAI_CLI_COMMAND")
|
||||
AkamaiCliCommandVersionEnv, AkamaiCliCommandVersionEnvOK := os.LookupEnv("AKAMAI_CLI_COMMAND_VERSION")
|
||||
|
||||
if AkamaiCliEnvOK && AkamaiCliVersionEnvOK {
|
||||
if req.Header.Get("User-Agent") != "" {
|
||||
req.Header.Set("User-Agent", req.Header.Get("User-Agent")+" AkamaiCLI/"+AkamaiCliVersionEnv)
|
||||
} else {
|
||||
req.Header.Set("User-Agent", "AkamaiCLI/"+AkamaiCliVersionEnv)
|
||||
}
|
||||
}
|
||||
if AkamaiCliCommandEnvOK && AkamaiCliCommandVersionEnvOK {
|
||||
if req.Header.Get("User-Agent") != "" {
|
||||
req.Header.Set("User-Agent", req.Header.Get("User-Agent")+" AkamaiCLI-"+AkamaiCliCommandEnv+"/"+AkamaiCliCommandVersionEnv)
|
||||
} else {
|
||||
req.Header.Set("User-Agent", "AkamaiCLI-"+AkamaiCliCommandEnv+"/"+AkamaiCliCommandVersionEnv)
|
||||
}
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", createAuthHeader(config, req, timestamp, nonce))
|
||||
return req
|
||||
}
|
||||
|
249
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/api_timeout.go
generated
vendored
Normal file
249
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/api_timeout.go
generated
vendored
Normal file
@@ -0,0 +1,249 @@
|
||||
package sdk
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var apiTimeouts = `{
|
||||
"ecs": {
|
||||
"ActivateRouterInterface": 10,
|
||||
"AddTags": 61,
|
||||
"AllocateDedicatedHosts": 10,
|
||||
"AllocateEipAddress": 17,
|
||||
"AllocatePublicIpAddress": 36,
|
||||
"ApplyAutoSnapshotPolicy": 10,
|
||||
"AssignIpv6Addresses": 10,
|
||||
"AssignPrivateIpAddresses": 10,
|
||||
"AssociateEipAddress": 17,
|
||||
"AttachClassicLinkVpc": 14,
|
||||
"AttachDisk": 36,
|
||||
"AttachInstanceRamRole": 11,
|
||||
"AttachKeyPair": 16,
|
||||
"AttachNetworkInterface": 16,
|
||||
"AuthorizeSecurityGroupEgress": 16,
|
||||
"AuthorizeSecurityGroup": 16,
|
||||
"CancelAutoSnapshotPolicy": 10,
|
||||
"CancelCopyImage": 10,
|
||||
"CancelPhysicalConnection": 10,
|
||||
"CancelSimulatedSystemEvents": 10,
|
||||
"CancelTask": 10,
|
||||
"ConnectRouterInterface": 10,
|
||||
"ConvertNatPublicIpToEip": 12,
|
||||
"CopyImage": 10,
|
||||
"CreateAutoSnapshotPolicy": 10,
|
||||
"CreateCommand": 16,
|
||||
"CreateDeploymentSet": 16,
|
||||
"CreateDisk": 36,
|
||||
"CreateHpcCluster": 10,
|
||||
"CreateImage": 36,
|
||||
"CreateInstance": 86,
|
||||
"CreateKeyPair": 10,
|
||||
"CreateLaunchTemplate": 10,
|
||||
"CreateLaunchTemplateVersion": 10,
|
||||
"CreateNatGateway": 36,
|
||||
"CreateNetworkInterfacePermission": 13,
|
||||
"CreateNetworkInterface": 16,
|
||||
"CreatePhysicalConnection": 10,
|
||||
"CreateRouteEntry": 17,
|
||||
"CreateRouterInterface": 10,
|
||||
"CreateSecurityGroup": 86,
|
||||
"CreateSimulatedSystemEvents": 10,
|
||||
"CreateSnapshot": 86,
|
||||
"CreateVirtualBorderRouter": 10,
|
||||
"CreateVpc": 16,
|
||||
"CreateVSwitch": 17,
|
||||
"DeactivateRouterInterface": 10,
|
||||
"DeleteAutoSnapshotPolicy": 10,
|
||||
"DeleteBandwidthPackage": 10,
|
||||
"DeleteCommand": 16,
|
||||
"DeleteDeploymentSet": 12,
|
||||
"DeleteDisk": 16,
|
||||
"DeleteHpcCluster": 10,
|
||||
"DeleteImage": 36,
|
||||
"DeleteInstance": 66,
|
||||
"DeleteKeyPairs": 10,
|
||||
"DeleteLaunchTemplate": 10,
|
||||
"DeleteLaunchTemplateVersion": 10,
|
||||
"DeleteNatGateway": 10,
|
||||
"DeleteNetworkInterfacePermission": 10,
|
||||
"DeleteNetworkInterface": 16,
|
||||
"DeletePhysicalConnection": 10,
|
||||
"DeleteRouteEntry": 16,
|
||||
"DeleteRouterInterface": 10,
|
||||
"DeleteSecurityGroup": 87,
|
||||
"DeleteSnapshot": 17,
|
||||
"DeleteVirtualBorderRouter": 10,
|
||||
"DeleteVpc": 17,
|
||||
"DeleteVSwitch": 17,
|
||||
"DescribeAccessPoints": 10,
|
||||
"DescribeAccountAttributes": 10,
|
||||
"DescribeAutoSnapshotPolicyEx": 16,
|
||||
"DescribeAvailableResource": 10,
|
||||
"DescribeBandwidthLimitation": 16,
|
||||
"DescribeBandwidthPackages": 10,
|
||||
"DescribeClassicLinkInstances": 15,
|
||||
"DescribeCloudAssistantStatus": 16,
|
||||
"DescribeClusters": 10,
|
||||
"DescribeCommands": 16,
|
||||
"DescribeDedicatedHosts": 10,
|
||||
"DescribeDedicatedHostTypes": 10,
|
||||
"DescribeDeploymentSets": 26,
|
||||
"DescribeDiskMonitorData": 16,
|
||||
"DescribeDisksFullStatus": 14,
|
||||
"DescribeDisks": 19,
|
||||
"DescribeEipAddresses": 16,
|
||||
"DescribeEipMonitorData": 16,
|
||||
"DescribeEniMonitorData": 10,
|
||||
"DescribeHaVips": 10,
|
||||
"DescribeHpcClusters": 16,
|
||||
"DescribeImageSharePermission": 10,
|
||||
"DescribeImages": 38,
|
||||
"DescribeImageSupportInstanceTypes": 16,
|
||||
"DescribeInstanceAttribute": 36,
|
||||
"DescribeInstanceAutoRenewAttribute": 17,
|
||||
"DescribeInstanceHistoryEvents": 19,
|
||||
"DescribeInstanceMonitorData": 19,
|
||||
"DescribeInstancePhysicalAttribute": 10,
|
||||
"DescribeInstanceRamRole": 11,
|
||||
"DescribeInstancesFullStatus": 14,
|
||||
"DescribeInstances": 10,
|
||||
"DescribeInstanceStatus": 26,
|
||||
"DescribeInstanceTopology": 12,
|
||||
"DescribeInstanceTypeFamilies": 17,
|
||||
"DescribeInstanceTypes": 17,
|
||||
"DescribeInstanceVncPasswd": 10,
|
||||
"DescribeInstanceVncUrl": 36,
|
||||
"DescribeInvocationResults": 16,
|
||||
"DescribeInvocations": 16,
|
||||
"DescribeKeyPairs": 12,
|
||||
"DescribeLaunchTemplates": 16,
|
||||
"DescribeLaunchTemplateVersions": 16,
|
||||
"DescribeLimitation": 36,
|
||||
"DescribeNatGateways": 10,
|
||||
"DescribeNetworkInterfacePermissions": 13,
|
||||
"DescribeNetworkInterfaces": 16,
|
||||
"DescribeNewProjectEipMonitorData": 16,
|
||||
"DescribePhysicalConnections": 10,
|
||||
"DescribePrice": 16,
|
||||
"DescribeRecommendInstanceType": 10,
|
||||
"DescribeRegions": 19,
|
||||
"DescribeRenewalPrice": 16,
|
||||
"DescribeResourceByTags": 10,
|
||||
"DescribeResourcesModification": 17,
|
||||
"DescribeRouterInterfaces": 10,
|
||||
"DescribeRouteTables": 17,
|
||||
"DescribeSecurityGroupAttribute": 133,
|
||||
"DescribeSecurityGroupReferences": 16,
|
||||
"DescribeSecurityGroups": 25,
|
||||
"DescribeSnapshotLinks": 17,
|
||||
"DescribeSnapshotMonitorData": 12,
|
||||
"DescribeSnapshotPackage": 10,
|
||||
"DescribeSnapshots": 26,
|
||||
"DescribeSnapshotsUsage": 26,
|
||||
"DescribeSpotPriceHistory": 22,
|
||||
"DescribeTags": 17,
|
||||
"DescribeTaskAttribute": 10,
|
||||
"DescribeTasks": 11,
|
||||
"DescribeUserBusinessBehavior": 13,
|
||||
"DescribeUserData": 10,
|
||||
"DescribeVirtualBorderRoutersForPhysicalConnection": 10,
|
||||
"DescribeVirtualBorderRouters": 10,
|
||||
"DescribeVpcs": 41,
|
||||
"DescribeVRouters": 17,
|
||||
"DescribeVSwitches": 17,
|
||||
"DescribeZones": 103,
|
||||
"DetachClassicLinkVpc": 14,
|
||||
"DetachDisk": 17,
|
||||
"DetachInstanceRamRole": 10,
|
||||
"DetachKeyPair": 10,
|
||||
"DetachNetworkInterface": 16,
|
||||
"EipFillParams": 19,
|
||||
"EipFillProduct": 13,
|
||||
"EipNotifyPaid": 10,
|
||||
"EnablePhysicalConnection": 10,
|
||||
"ExportImage": 10,
|
||||
"GetInstanceConsoleOutput": 14,
|
||||
"GetInstanceScreenshot": 14,
|
||||
"ImportImage": 29,
|
||||
"ImportKeyPair": 10,
|
||||
"InstallCloudAssistant": 10,
|
||||
"InvokeCommand": 16,
|
||||
"JoinResourceGroup": 10,
|
||||
"JoinSecurityGroup": 66,
|
||||
"LeaveSecurityGroup": 66,
|
||||
"ModifyAutoSnapshotPolicyEx": 10,
|
||||
"ModifyBandwidthPackageSpec": 11,
|
||||
"ModifyCommand": 10,
|
||||
"ModifyDeploymentSetAttribute": 10,
|
||||
"ModifyDiskAttribute": 16,
|
||||
"ModifyDiskChargeType": 13,
|
||||
"ModifyEipAddressAttribute": 14,
|
||||
"ModifyImageAttribute": 10,
|
||||
"ModifyImageSharePermission": 16,
|
||||
"ModifyInstanceAttribute": 22,
|
||||
"ModifyInstanceAutoReleaseTime": 15,
|
||||
"ModifyInstanceAutoRenewAttribute": 16,
|
||||
"ModifyInstanceChargeType": 22,
|
||||
"ModifyInstanceDeployment": 10,
|
||||
"ModifyInstanceNetworkSpec": 36,
|
||||
"ModifyInstanceSpec": 62,
|
||||
"ModifyInstanceVncPasswd": 35,
|
||||
"ModifyInstanceVpcAttribute": 15,
|
||||
"ModifyLaunchTemplateDefaultVersion": 10,
|
||||
"ModifyNetworkInterfaceAttribute": 10,
|
||||
"ModifyPhysicalConnectionAttribute": 10,
|
||||
"ModifyPrepayInstanceSpec": 13,
|
||||
"ModifyRouterInterfaceAttribute": 10,
|
||||
"ModifySecurityGroupAttribute": 10,
|
||||
"ModifySecurityGroupEgressRule": 10,
|
||||
"ModifySecurityGroupPolicy": 10,
|
||||
"ModifySecurityGroupRule": 16,
|
||||
"ModifySnapshotAttribute": 10,
|
||||
"ModifyUserBusinessBehavior": 10,
|
||||
"ModifyVirtualBorderRouterAttribute": 10,
|
||||
"ModifyVpcAttribute": 10,
|
||||
"ModifyVRouterAttribute": 10,
|
||||
"ModifyVSwitchAttribute": 10,
|
||||
"ReActivateInstances": 10,
|
||||
"RebootInstance": 27,
|
||||
"RedeployInstance": 14,
|
||||
"ReInitDisk": 16,
|
||||
"ReleaseDedicatedHost": 10,
|
||||
"ReleaseEipAddress": 16,
|
||||
"ReleasePublicIpAddress": 10,
|
||||
"RemoveTags": 10,
|
||||
"RenewInstance": 19,
|
||||
"ReplaceSystemDisk": 36,
|
||||
"ResetDisk": 36,
|
||||
"ResizeDisk": 11,
|
||||
"RevokeSecurityGroupEgress": 13,
|
||||
"RevokeSecurityGroup": 16,
|
||||
"RunInstances": 86,
|
||||
"StartInstance": 46,
|
||||
"StopInstance": 27,
|
||||
"StopInvocation": 10,
|
||||
"TerminatePhysicalConnection": 10,
|
||||
"TerminateVirtualBorderRouter": 10,
|
||||
"UnassignIpv6Addresses": 10,
|
||||
"UnassignPrivateIpAddresses": 10,
|
||||
"UnassociateEipAddress": 16
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
func getAPIMaxTimeout(product, actionName string) (time.Duration, bool) {
|
||||
timeout := make(map[string]map[string]int)
|
||||
err := json.Unmarshal([]byte(apiTimeouts), &timeout)
|
||||
if err != nil {
|
||||
return 0 * time.Millisecond, false
|
||||
}
|
||||
|
||||
obj := timeout[strings.ToLower(product)]
|
||||
if obj != nil && obj[actionName] != 0 {
|
||||
return time.Duration(obj[actionName]) * time.Second, true
|
||||
}
|
||||
|
||||
return 0 * time.Millisecond, false
|
||||
}
|
12
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go
generated
vendored
Normal file
12
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
package credentials
|
||||
|
||||
type BearerTokenCredential struct {
|
||||
BearerToken string
|
||||
}
|
||||
|
||||
// NewBearerTokenCredential return a BearerTokenCredential object
|
||||
func NewBearerTokenCredential(token string) *BearerTokenCredential {
|
||||
return &BearerTokenCredential{
|
||||
BearerToken: token,
|
||||
}
|
||||
}
|
24
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go
generated
vendored
24
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go
generated
vendored
@@ -1,17 +1,5 @@
|
||||
package credentials
|
||||
|
||||
// Deprecated: Use EcsRamRoleCredential in this package instead.
|
||||
type StsRoleNameOnEcsCredential struct {
|
||||
RoleName string
|
||||
}
|
||||
|
||||
// Deprecated: Use NewEcsRamRoleCredential in this package instead.
|
||||
func NewStsRoleNameOnEcsCredential(roleName string) *StsRoleNameOnEcsCredential {
|
||||
return &StsRoleNameOnEcsCredential{
|
||||
RoleName: roleName,
|
||||
}
|
||||
}
|
||||
|
||||
func (oldCred *StsRoleNameOnEcsCredential) ToEcsRamRoleCredential() *EcsRamRoleCredential {
|
||||
return &EcsRamRoleCredential{
|
||||
RoleName: oldCred.RoleName,
|
||||
@@ -27,3 +15,15 @@ func NewEcsRamRoleCredential(roleName string) *EcsRamRoleCredential {
|
||||
RoleName: roleName,
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: Use EcsRamRoleCredential in this package instead.
|
||||
type StsRoleNameOnEcsCredential struct {
|
||||
RoleName string
|
||||
}
|
||||
|
||||
// Deprecated: Use NewEcsRamRoleCredential in this package instead.
|
||||
func NewStsRoleNameOnEcsCredential(roleName string) *StsRoleNameOnEcsCredential {
|
||||
return &StsRoleNameOnEcsCredential{
|
||||
RoleName: roleName,
|
||||
}
|
||||
}
|
||||
|
30
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/env.go
generated
vendored
Normal file
30
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/env.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
|
||||
)
|
||||
|
||||
type EnvProvider struct{}
|
||||
|
||||
var ProviderEnv = new(EnvProvider)
|
||||
|
||||
func NewEnvProvider() Provider {
|
||||
return &EnvProvider{}
|
||||
}
|
||||
|
||||
func (p *EnvProvider) Resolve() (auth.Credential, error) {
|
||||
accessKeyID, ok1 := os.LookupEnv(ENVAccessKeyID)
|
||||
accessKeySecret, ok2 := os.LookupEnv(ENVAccessKeySecret)
|
||||
if !ok1 || !ok2 {
|
||||
return nil, nil
|
||||
}
|
||||
if accessKeyID == "" || accessKeySecret == "" {
|
||||
return nil, errors.New("Environmental variable (ALIBABACLOUD_ACCESS_KEY_ID or ALIBABACLOUD_ACCESS_KEY_SECRET) is empty")
|
||||
}
|
||||
return credentials.NewAccessKeyCredential(accessKeyID, accessKeySecret), nil
|
||||
}
|
92
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/instance_credentials.go
generated
vendored
Normal file
92
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/instance_credentials.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
|
||||
)
|
||||
|
||||
var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/"
|
||||
|
||||
type InstanceCredentialsProvider struct{}
|
||||
|
||||
var ProviderInstance = new(InstanceCredentialsProvider)
|
||||
|
||||
var HookGet = func(fn func(string) (int, []byte, error)) func(string) (int, []byte, error) {
|
||||
return fn
|
||||
}
|
||||
|
||||
func NewInstanceCredentialsProvider() Provider {
|
||||
return &InstanceCredentialsProvider{}
|
||||
}
|
||||
|
||||
func (p *InstanceCredentialsProvider) Resolve() (auth.Credential, error) {
|
||||
roleName, ok := os.LookupEnv(ENVEcsMetadata)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
if roleName == "" {
|
||||
return nil, errors.New("Environmental variable 'ALIBABA_CLOUD_ECS_METADATA' are empty")
|
||||
}
|
||||
status, content, err := HookGet(get)(securityCredURL + roleName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if status != 200 {
|
||||
if status == 404 {
|
||||
return nil, fmt.Errorf("The role was not found in the instance")
|
||||
}
|
||||
return nil, fmt.Errorf("Received %d when getting security credentials for %s", status, roleName)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
|
||||
if err := json.Unmarshal(content, &body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accessKeyID, err := extractString(body, "AccessKeyId")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accessKeySecret, err := extractString(body, "AccessKeySecret")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
securityToken, err := extractString(body, "SecurityToken")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return credentials.NewStsTokenCredential(accessKeyID, accessKeySecret, securityToken), nil
|
||||
}
|
||||
|
||||
func get(url string) (status int, content []byte, err error) {
|
||||
httpClient := http.DefaultClient
|
||||
httpClient.Timeout = time.Second * 1
|
||||
resp, err := httpClient.Get(url)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
content, err = ioutil.ReadAll(resp.Body)
|
||||
return resp.StatusCode, content, err
|
||||
}
|
||||
|
||||
func extractString(m map[string]interface{}, key string) (string, error) {
|
||||
raw, ok := m[key]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("%s not in map", key)
|
||||
}
|
||||
str, ok := raw.(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("%s is not a string in map", key)
|
||||
}
|
||||
return str, nil
|
||||
}
|
158
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/profile_credentials.go
generated
vendored
Normal file
158
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/profile_credentials.go
generated
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
|
||||
|
||||
ini "gopkg.in/ini.v1"
|
||||
)
|
||||
|
||||
type ProfileProvider struct {
|
||||
Profile string
|
||||
}
|
||||
|
||||
var ProviderProfile = NewProfileProvider()
|
||||
|
||||
// NewProfileProvider receive zero or more parameters,
|
||||
// when length of name is 0, the value of field Profile will be "default",
|
||||
// and when there are multiple inputs, the function will take the
|
||||
// first one and discard the other values.
|
||||
func NewProfileProvider(name ...string) Provider {
|
||||
p := new(ProfileProvider)
|
||||
if len(name) == 0 {
|
||||
p.Profile = "default"
|
||||
} else {
|
||||
p.Profile = name[0]
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Resolve implements the Provider interface
|
||||
// when credential type is rsa_key_pair, the content of private_key file
|
||||
// must be able to be parsed directly into the required string
|
||||
// that NewRsaKeyPairCredential function needed
|
||||
func (p *ProfileProvider) Resolve() (auth.Credential, error) {
|
||||
path, ok := os.LookupEnv(ENVCredentialFile)
|
||||
if !ok {
|
||||
path, err := checkDefaultPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if path == "" {
|
||||
return nil, nil
|
||||
}
|
||||
} else if path == "" {
|
||||
return nil, errors.New("Environment variable '" + ENVCredentialFile + "' cannot be empty")
|
||||
}
|
||||
|
||||
ini, err := ini.Load(path)
|
||||
if err != nil {
|
||||
return nil, errors.New("ERROR: Can not open file" + err.Error())
|
||||
}
|
||||
|
||||
section, err := ini.GetSection(p.Profile)
|
||||
if err != nil {
|
||||
return nil, errors.New("ERROR: Can not load section" + err.Error())
|
||||
}
|
||||
|
||||
value, err := section.GetKey("type")
|
||||
if err != nil {
|
||||
return nil, errors.New("ERROR: Can not find credential type" + err.Error())
|
||||
}
|
||||
|
||||
switch value.String() {
|
||||
case "access_key":
|
||||
value1, err1 := section.GetKey("access_key_id")
|
||||
value2, err2 := section.GetKey("access_key_secret")
|
||||
if err1 != nil || err2 != nil {
|
||||
return nil, errors.New("ERROR: Failed to get value")
|
||||
}
|
||||
if value1.String() == "" || value2.String() == "" {
|
||||
return nil, errors.New("ERROR: Value can't be empty")
|
||||
}
|
||||
return credentials.NewAccessKeyCredential(value1.String(), value2.String()), nil
|
||||
case "ecs_ram_role":
|
||||
value1, err1 := section.GetKey("role_name")
|
||||
if err1 != nil {
|
||||
return nil, errors.New("ERROR: Failed to get value")
|
||||
}
|
||||
if value1.String() == "" {
|
||||
return nil, errors.New("ERROR: Value can't be empty")
|
||||
}
|
||||
return credentials.NewEcsRamRoleCredential(value1.String()), nil
|
||||
case "ram_role_arn":
|
||||
value1, err1 := section.GetKey("access_key_id")
|
||||
value2, err2 := section.GetKey("access_key_secret")
|
||||
value3, err3 := section.GetKey("role_arn")
|
||||
value4, err4 := section.GetKey("role_session_name")
|
||||
if err1 != nil || err2 != nil || err3 != nil || err4 != nil {
|
||||
return nil, errors.New("ERROR: Failed to get value")
|
||||
}
|
||||
if value1.String() == "" || value2.String() == "" || value3.String() == "" || value4.String() == "" {
|
||||
return nil, errors.New("ERROR: Value can't be empty")
|
||||
}
|
||||
return credentials.NewRamRoleArnCredential(value1.String(), value2.String(), value3.String(), value4.String(), 3600), nil
|
||||
case "rsa_key_pair":
|
||||
value1, err1 := section.GetKey("public_key_id")
|
||||
value2, err2 := section.GetKey("private_key_file")
|
||||
if err1 != nil || err2 != nil {
|
||||
return nil, errors.New("ERROR: Failed to get value")
|
||||
}
|
||||
if value1.String() == "" || value2.String() == "" {
|
||||
return nil, errors.New("ERROR: Value can't be empty")
|
||||
}
|
||||
file, err := os.Open(value2.String())
|
||||
if err != nil {
|
||||
return nil, errors.New("ERROR: Can not get private_key")
|
||||
}
|
||||
defer file.Close()
|
||||
var privateKey string
|
||||
scan := bufio.NewScanner(file)
|
||||
var data string
|
||||
for scan.Scan() {
|
||||
if strings.HasPrefix(scan.Text(), "----") {
|
||||
continue
|
||||
}
|
||||
data += scan.Text() + "\n"
|
||||
}
|
||||
return credentials.NewRsaKeyPairCredential(privateKey, value1.String(), 3600), nil
|
||||
default:
|
||||
return nil, errors.New("ERROR: Failed to get credential")
|
||||
}
|
||||
}
|
||||
|
||||
// GetHomePath return home directory according to the system.
|
||||
// if the environmental virables does not exist, will return empty
|
||||
func GetHomePath() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
path, ok := os.LookupEnv("USERPROFILE")
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return path
|
||||
}
|
||||
path, ok := os.LookupEnv("HOME")
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func checkDefaultPath() (path string, err error) {
|
||||
path = GetHomePath()
|
||||
if path == "" {
|
||||
return "", errors.New("The default credential file path is invalid")
|
||||
}
|
||||
path = strings.Replace("~/.alibabacloud/credentials", "~", path, 1)
|
||||
_, err = os.Stat(path)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return path, nil
|
||||
}
|
19
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider.go
generated
vendored
Normal file
19
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
|
||||
)
|
||||
|
||||
//Environmental virables that may be used by the provider
|
||||
const (
|
||||
ENVAccessKeyID = "ALIBABA_CLOUD_ACCESS_KEY_ID"
|
||||
ENVAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET"
|
||||
ENVCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE"
|
||||
ENVEcsMetadata = "ALIBABA_CLOUD_ECS_METADATA"
|
||||
PATHCredentialFile = "~/.alibabacloud/credentials"
|
||||
)
|
||||
|
||||
// When you want to customize the provider, you only need to implement the method of the interface.
|
||||
type Provider interface {
|
||||
Resolve() (auth.Credential, error)
|
||||
}
|
34
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider_chain.go
generated
vendored
Normal file
34
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider_chain.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
|
||||
)
|
||||
|
||||
type ProviderChain struct {
|
||||
Providers []Provider
|
||||
}
|
||||
|
||||
var defaultproviders = []Provider{ProviderEnv, ProviderProfile, ProviderInstance}
|
||||
var DefaultChain = NewProviderChain(defaultproviders)
|
||||
|
||||
func NewProviderChain(providers []Provider) Provider {
|
||||
return &ProviderChain{
|
||||
Providers: providers,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProviderChain) Resolve() (auth.Credential, error) {
|
||||
for _, provider := range p.Providers {
|
||||
creds, err := provider.Resolve()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if err == nil && creds == nil {
|
||||
continue
|
||||
}
|
||||
return creds, err
|
||||
}
|
||||
return nil, errors.New("No credential found")
|
||||
|
||||
}
|
@@ -15,6 +15,7 @@ type RamRoleArnCredential struct {
|
||||
RoleArn string
|
||||
RoleSessionName string
|
||||
RoleSessionExpiration int
|
||||
Policy string
|
||||
}
|
||||
|
||||
// Deprecated: Use RamRoleArnCredential in this package instead.
|
||||
@@ -47,3 +48,14 @@ func NewRamRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionN
|
||||
RoleSessionExpiration: roleSessionExpiration,
|
||||
}
|
||||
}
|
||||
|
||||
func NewRamRoleArnWithPolicyCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int) *RamRoleArnCredential {
|
||||
return &RamRoleArnCredential{
|
||||
AccessKeyId: accessKeyId,
|
||||
AccessKeySecret: accessKeySecret,
|
||||
RoleArn: roleArn,
|
||||
RoleSessionName: roleSessionName,
|
||||
RoleSessionExpiration: roleSessionExpiration,
|
||||
Policy: policy,
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user